1.octavia changes for supporting Uni-Dimensional scale testing
Change-Id: I9c1508c290fea4759bb577227bd35f77e4625311
This commit is contained in:
parent
2fb1aa210c
commit
b401a5bb9e
@ -145,6 +145,12 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
cls.octavia_hm_client = openstack_network_clients.\
|
||||
OctaviaHealthMonitorClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
cls.octavia_admin_members_client = openstack_network_clients.\
|
||||
OctaviaMembersClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
@ -1024,14 +1030,15 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
count += 1
|
||||
else:
|
||||
break
|
||||
if barbican or external_subnet:
|
||||
self.cmgr_adm.ports_client.update_port(
|
||||
self.loadbalancer['vip_port_id'],
|
||||
security_groups=[self.sg['id']])
|
||||
else:
|
||||
self.ports_client.update_port(
|
||||
self.loadbalancer['vip_port_id'],
|
||||
security_groups=[self.sg['id']])
|
||||
if not CONF.nsxv3.ens:
|
||||
if barbican or external_subnet:
|
||||
self.cmgr_adm.ports_client.update_port(
|
||||
self.loadbalancer['vip_port_id'],
|
||||
security_groups=[self.sg['id']])
|
||||
else:
|
||||
self.ports_client.update_port(
|
||||
self.loadbalancer['vip_port_id'],
|
||||
security_groups=[self.sg['id']])
|
||||
# create lbaas public interface
|
||||
if barbican or external_subnet:
|
||||
if not hasattr(self, 'vip_ip_address'):
|
||||
@ -1493,15 +1500,17 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
listener.get('id'))
|
||||
self.wait_for_octavia_loadbalancer_status(lb_id)
|
||||
# delete pools not attached to listener, but loadbalancer
|
||||
for pool in lb.get('pools', []):
|
||||
self.delete_lb_pool_resources(lb_id, pool)
|
||||
lb_pools = self.octavia_admin_pools_client.\
|
||||
list_octavia_pools()['pools']
|
||||
for i in lb_pools:
|
||||
pool_id = i['id']
|
||||
self.octavia_admin_pools_client.delete_octavia_pool(pool_id)
|
||||
self.wait_for_octavia_loadbalancer_status(lb_id)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
oc_client.delete_octavia_load_balancer, lb_id)
|
||||
self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id,
|
||||
is_delete_op=True)
|
||||
lbs = oc_client.list_octavia_load_balancers()['loadbalancers']
|
||||
self.assertEqual(0, len(lbs))
|
||||
|
||||
def delete_octavia_lb_pool_resources(self, lb_id, pool):
|
||||
"""Deletion of lbaas pool resources.
|
||||
@ -1511,6 +1520,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
|
||||
"""
|
||||
pool_id = pool.get('id')
|
||||
self.wait_for_octavia_loadbalancer_status(lb_id)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.octavia_admin_pools_client.delete_octavia_pool,
|
||||
pool_id)
|
||||
@ -1527,7 +1537,10 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
pool_protocol=None, pool_port=None,
|
||||
vip_subnet_id=None,
|
||||
lb_id=None, count=None,
|
||||
clean_up=None):
|
||||
clean_up=None, vip_net_id=None,
|
||||
delay=None, max_retries=None,
|
||||
timeout=None, default_pool=False,
|
||||
vip_port_id=None):
|
||||
count = 0
|
||||
lb_name = None
|
||||
if lb_id is None:
|
||||
@ -1535,7 +1548,9 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
self.loadbalancer = self.\
|
||||
octavia_admin_client.\
|
||||
create_octavia_load_balancer(name=lb_name,
|
||||
vip_subnet_id=vip_subnet_id
|
||||
vip_subnet_id=vip_subnet_id,
|
||||
vip_network_id=vip_net_id,
|
||||
vip_port_id=vip_port_id
|
||||
)['loadbalancer']
|
||||
lb_id = self.loadbalancer['id']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
@ -1545,13 +1560,31 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
protocol_port=protocol_port,
|
||||
name=lb_name)['listener']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.pool = self.octavia_admin_pools_client.\
|
||||
create_octavia_pool(listener_id=self.listener['id'],
|
||||
lb_algorithm=lb_algorithm,
|
||||
protocol=protocol_type,
|
||||
name=lb_name)
|
||||
if default_pool:
|
||||
self.pool = self.octavia_admin_pools_client.\
|
||||
create_octavia_pool(loadbalancer_id=lb_id,
|
||||
lb_algorithm=lb_algorithm,
|
||||
protocol=protocol_type,
|
||||
name=lb_name)
|
||||
pool_id = self.pool['pool']['id']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.octavia_admin_listener_client.\
|
||||
update_octavia_listener(default_pool_id=pool_id,
|
||||
listener_id=self.listener['id'])
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
else:
|
||||
self.pool = self.octavia_admin_pools_client.\
|
||||
create_octavia_pool(listener_id=self.listener['id'],
|
||||
lb_algorithm=lb_algorithm,
|
||||
protocol=protocol_type,
|
||||
name=lb_name)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
pool_id = self.pool['pool']['id']
|
||||
if hm_type:
|
||||
self.healthmonitor = self.octavia_hm_client.\
|
||||
create_octavia_hm(pool_id=pool_id, type=hm_type, delay=2,
|
||||
timeout=2, max_retries=2, name=lb_name)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.members = []
|
||||
for server_name in self.topology_servers.keys():
|
||||
if count < member_count:
|
||||
@ -1621,3 +1654,121 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
if present:
|
||||
dr_present = True
|
||||
return[{'dr_present': dr_present}, {'sr_present': sr_present}]
|
||||
|
||||
def create_project_octavia_scale(self, protocol_type,
|
||||
protocol_port, lb_algorithm,
|
||||
hm_type=None, member_count=2,
|
||||
max_vms=None, weight=None,
|
||||
fip_disassociate=None,
|
||||
pool_protocol=None, pool_port=None,
|
||||
vip_subnet_id=None,
|
||||
lb_id=None, count=None,
|
||||
clean_up=None, vip_net_id=None,
|
||||
delay=None, max_retries=None,
|
||||
timeout=None, default_pool=False,
|
||||
vip_port_id=None, scale=None,
|
||||
listener_count=None, pool_count=None,
|
||||
lb_pool=False):
|
||||
count = 0
|
||||
lb_name = None
|
||||
lb_name = data_utils.rand_name(self.namestart)
|
||||
if not listener_count:
|
||||
listener_count = 1
|
||||
if not pool_count:
|
||||
pool_count = 1
|
||||
self.pools = []
|
||||
for i in range(scale):
|
||||
self.loadbalancer = self.\
|
||||
octavia_admin_client.\
|
||||
create_octavia_load_balancer(name=lb_name,
|
||||
vip_subnet_id=vip_subnet_id,
|
||||
vip_network_id=vip_net_id,
|
||||
)['loadbalancer']
|
||||
lb_id = self.loadbalancer['id']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
for lc in range(listener_count):
|
||||
protocol_port = str(int(protocol_port) + 1)
|
||||
self.listener = self.octavia_admin_listener_client.\
|
||||
create_octavia_listener(loadbalancer_id=lb_id,
|
||||
protocol=protocol_type,
|
||||
protocol_port=protocol_port,
|
||||
name=lb_name)['listener']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
l_id = self.listener['id']
|
||||
for x in range(pool_count):
|
||||
if not lb_pool:
|
||||
self.pool = self.octavia_admin_pools_client.\
|
||||
create_octavia_pool(listener_id=l_id,
|
||||
lb_algorithm=lb_algorithm,
|
||||
protocol=protocol_type,
|
||||
name=lb_name)
|
||||
else:
|
||||
self.pool = self.octavia_admin_pools_client.\
|
||||
create_octavia_pool(loadbalancer_id=lb_id,
|
||||
lb_algorithm=lb_algorithm,
|
||||
protocol=protocol_type,
|
||||
name=lb_name)
|
||||
self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id)
|
||||
pool_id = self.pool['pool']['id']
|
||||
self.octavia_admin_listener_client.\
|
||||
update_octavia_listener(listener_id=l_id,
|
||||
default_pool_id=pool_id)
|
||||
self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id)
|
||||
if hm_type:
|
||||
self.healthmonitor = self.octavia_hm_client.\
|
||||
create_octavia_hm(pool_id=pool_id,
|
||||
type=hm_type, delay=2,
|
||||
timeout=2, max_retries=2,
|
||||
name=lb_name)
|
||||
self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id)
|
||||
self.members = []
|
||||
count = 0
|
||||
for server_name in self.topology_servers.keys():
|
||||
if count < member_count:
|
||||
fip_data = self.servers_details[server_name].\
|
||||
floating_ips[0]
|
||||
fixed_ip_address = fip_data['fixed_ip_address']
|
||||
if fip_disassociate is None:
|
||||
kwargs = dict(port_id=None)
|
||||
self.cmgr_adm.floating_ips_client.\
|
||||
update_floatingip(fip_data['id'],
|
||||
**kwargs)['floatingip']
|
||||
|
||||
if weight:
|
||||
weight += count
|
||||
member = self.octavia_admin_members_client.\
|
||||
create_octavia_member(
|
||||
pool_id, subnet_id=vip_subnet_id,
|
||||
address=fixed_ip_address,
|
||||
protocol_port=protocol_port,
|
||||
weight=weight)
|
||||
else:
|
||||
member = self.octavia_admin_members_client.\
|
||||
create_octavia_member(
|
||||
pool_id, subnet_id=vip_subnet_id,
|
||||
address=fixed_ip_address,
|
||||
protocol_port=protocol_port)
|
||||
self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id)
|
||||
self.members.append(member)
|
||||
self.server_names.append(server_name)
|
||||
count += 1
|
||||
else:
|
||||
break
|
||||
self.cmgr_adm.ports_client.\
|
||||
update_port(self.loadbalancer['vip_port_id'],
|
||||
security_groups=[self.sg['id']])
|
||||
# create floatingip for public network
|
||||
self.cmgr_adm.ports_client.update_port(
|
||||
self.loadbalancer['vip_port_id'],
|
||||
security_groups=[
|
||||
self.sg['id']])
|
||||
vip_fip = self.create_floatingip(
|
||||
self.loadbalancer,
|
||||
client=self.cmgr_adm.floating_ips_client,
|
||||
port_id=self.loadbalancer['vip_port_id'])
|
||||
self.vip_ip_address = vip_fip['floating_ip_address']
|
||||
return 1
|
||||
|
@ -736,6 +736,11 @@ class OctaviaListenersClient(base.BaseNetworkClient):
|
||||
uri = self.resource_object_path % listener_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def update_octavia_listener(self, listener_id, default_pool_id):
|
||||
post_data = {"listener": {"default_pool_id": default_pool_id}}
|
||||
uri = self.resource_object_path % listener_id
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
|
||||
class OctaviaPoolsClient(base.BaseNetworkClient):
|
||||
"""
|
||||
@ -759,6 +764,33 @@ class OctaviaPoolsClient(base.BaseNetworkClient):
|
||||
uri = self.resource_object_path % pool_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_octavia_pools(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
class OctaviaHealthMonitorClient(base.BaseNetworkClient):
|
||||
"""
|
||||
The client is responsible for
|
||||
Creating pool
|
||||
Deleting pool
|
||||
"""
|
||||
resource = 'healthmonitor'
|
||||
resource_plural = 'healthmonitors'
|
||||
path = 'lbaas/healthmonitors'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def create_octavia_hm(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def delete_octavia_hm(self, hm_id):
|
||||
time.sleep(constants.NSX_BACKEND_TIME_INTERVAL)
|
||||
uri = self.resource_object_path % hm_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
|
||||
class OctaviaMembersClient(base.BaseNetworkClient):
|
||||
"""
|
||||
|
@ -0,0 +1,216 @@
|
||||
# Copyright 2019 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest_plugin.common import constants
|
||||
from vmware_nsx_tempest_plugin.lib import feature_manager
|
||||
|
||||
|
||||
LOG = constants.log.getLogger(__name__)
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||
|
||||
"""Base class to support LBaaS ROUND-ROBIN test.
|
||||
|
||||
It provides the methods to create loadbalancer network, and
|
||||
start web servers.
|
||||
|
||||
Default lb_algorithm is ROUND_ROBIND.
|
||||
"""
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(OctaviaRoundRobin, cls).setup_clients()
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
cls.cmgr_alt = cls.get_client_manager('alt')
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(OctaviaRoundRobin, cls).skip_checks()
|
||||
cfg = CONF.network
|
||||
if not test.is_extension_enabled('lbaasv2', 'network'):
|
||||
msg = 'lbaasv2 extension is not enabled.'
|
||||
raise cls.skipException(msg)
|
||||
if not (cfg.project_networks_reachable or cfg.public_network_id):
|
||||
msg = ('Either project_networks_reachable must be "true", or '
|
||||
'public_network_id must be defined.')
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(OctaviaRoundRobin, cls).resource_setup()
|
||||
|
||||
@classmethod
|
||||
def setup_credentials(cls):
|
||||
# Ask framework to not create network resources for these tests.
|
||||
cls.set_network_resources()
|
||||
super(OctaviaRoundRobin, cls).setup_credentials()
|
||||
|
||||
def setUp(self):
|
||||
super(OctaviaRoundRobin, self).setUp()
|
||||
CONF.validation.ssh_shell_prologue = ''
|
||||
self.vip_ip_address = ''
|
||||
self.namestart = 'lbaas-ops'
|
||||
self.poke_counters = 12
|
||||
self.hm_delay = 4
|
||||
self.hm_max_retries = 3
|
||||
self.hm_timeout = 10
|
||||
self.server_names = []
|
||||
self.loadbalancer = None
|
||||
self.vip_fip = None
|
||||
self.web_service_start_delay = 2.5
|
||||
|
||||
def tearDown(self):
|
||||
if self.vip_fip:
|
||||
LOG.debug("tearDown lbass vip fip")
|
||||
self.disassociate_floatingip(self.vip_fip, and_delete=True)
|
||||
if self.loadbalancer:
|
||||
LOG.debug("tearDown lbass")
|
||||
lb_id = self.loadbalancer['id']
|
||||
self.delete_octavia_lb_resources(lb_id)
|
||||
|
||||
LOG.debug("tearDown lbaas exiting...")
|
||||
super(OctaviaRoundRobin, self).tearDown()
|
||||
|
||||
def deploy_octavia_topology(self, no_of_servers=2, image_id=None):
|
||||
kwargs = {'name': "router_lbaas",
|
||||
'external_gateway_info':
|
||||
{"network_id": CONF.network.public_network_id}}
|
||||
router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.routers_client.delete_router,
|
||||
router_lbaas['router']['id'])
|
||||
networks_client = self.cmgr_adm.networks_client
|
||||
name = "network_lbaas_1"
|
||||
network_lbaas_1 = self.\
|
||||
create_topology_network(name,
|
||||
networks_client=networks_client)
|
||||
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||
sec_client = self.cmgr_adm.security_groups_client
|
||||
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
||||
security_group_rules_client=sec_rule_client,
|
||||
security_groups_client=sec_client)
|
||||
self.sg = self.create_topology_security_group(**kwargs)
|
||||
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||
port_range_min=constants.HTTP_PORT,
|
||||
port_range_max=constants.HTTP_PORT, ),
|
||||
dict(direction='ingress', protocol='tcp',
|
||||
port_range_min=443, port_range_max=443, )]
|
||||
t_id = network_lbaas_1['tenant_id']
|
||||
for rule in lbaas_rules:
|
||||
self.add_security_group_rule(self.sg, rule,
|
||||
secclient=sec_client,
|
||||
ruleclient=sec_rule_client,
|
||||
tenant_id=t_id)
|
||||
body = {"network_id": network_lbaas_1['id'],
|
||||
"allocation_pools": [{"start": "2.0.0.2", "end": "2.0.0.254"}],
|
||||
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
subnet_lbaas = subnet_client.create_subnet(**body)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnet_client.delete_subnet,
|
||||
subnet_lbaas['subnet']['id'])
|
||||
self.cmgr_adm.routers_client.\
|
||||
add_router_interface(router_lbaas['router']['id'],
|
||||
subnet_id=subnet_lbaas['subnet']['id'])
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.routers_client.remove_router_interface,
|
||||
router_lbaas['router']['id'],
|
||||
subnet_id=subnet_lbaas['subnet']['id'])
|
||||
for instance in range(0, no_of_servers):
|
||||
self.create_topology_instance(
|
||||
"server_lbaas_%s" % instance, [network_lbaas_1],
|
||||
security_groups=[{'name': self.sg['name']}],
|
||||
image_id=image_id, clients=self.cmgr_adm)
|
||||
return dict(router=router_lbaas, subnet=subnet_lbaas,
|
||||
network=network_lbaas_1)
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a34')
|
||||
def test_create_verify_octavia_lb_with_vip_subnet_id_rr_scale(self):
|
||||
"""
|
||||
This testcase creates an octavia Loadbalancer with vip-subnet-ip
|
||||
option, and verifies the traffic on the loadbalancer vip
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
if not CONF.nsxv3.ens:
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
subnet_id = diction['subnet']['subnet']['id']
|
||||
self.create_project_octavia_scale(protocol_type="HTTP",
|
||||
protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_subnet_id=subnet_id,
|
||||
scale=1,
|
||||
hm_type='PING', delay=self.hm_delay,
|
||||
max_retries=self.hm_max_retries,
|
||||
timeout=self.hm_timeout,
|
||||
listener_count=20)
|
||||
|
||||
self.check_project_lbaas()
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('c5ac8546-6668-4b7a-8704-3844b11b1a34')
|
||||
def test_create_verify_octavia_lb_1listener_3pools(self):
|
||||
"""
|
||||
Create octavia LB, 1 Listener, 3 pools, no-session-persistence
|
||||
, round robin algorithm
|
||||
This testcase creates an octavia Loadbalancer with vip-subnet-ip
|
||||
option, and verifies the traffic on the loadbalancer vip
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
if not CONF.nsxv3.ens:
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
subnet_id = diction['subnet']['subnet']['id']
|
||||
self.create_project_octavia_scale(protocol_type="HTTP",
|
||||
protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_subnet_id=subnet_id, scale=1,
|
||||
hm_type='PING',
|
||||
delay=self.hm_delay,
|
||||
max_retries=self.hm_max_retries,
|
||||
timeout=self.hm_timeout,
|
||||
listener_count=1,
|
||||
pool_count=3, lb_pool=True)
|
||||
self.check_project_lbaas()
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('c5ac8546-6568-4b7a-8704-3844b11b1a34')
|
||||
def test_create_verify_octavia_lb_1listener_60pools(self):
|
||||
"""
|
||||
Create octavia LB, 1 Listener, 60 pools, no-session-persistence
|
||||
, round robin algorithm
|
||||
This testcase creates an octavia Loadbalancer with vip-subnet-ip
|
||||
option, and verifies the traffic on the loadbalancer vip
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
if not CONF.nsxv3.ens:
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
subnet_id = diction['subnet']['subnet']['id']
|
||||
self.create_project_octavia_scale(protocol_type="HTTP",
|
||||
protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_subnet_id=subnet_id, scale=1,
|
||||
hm_type='PING', delay=self.hm_delay,
|
||||
max_retries=self.hm_max_retries,
|
||||
timeout=self.hm_timeout,
|
||||
listener_count=1,
|
||||
pool_count=60, lb_pool=True)
|
||||
self.check_project_lbaas()
|
Loading…
x
Reference in New Issue
Block a user