Gating cases addition with lb mdproxy + lb externcal case fix
{0} tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group [5.296258s] ... ok {0} vmware_nsx_tempest_plugin.tests.api.test_v2_fwaas.TestFwaasV2Ops.test_delete_fw_group_when_port_in_use [84.232465s] ... ok {0} vmware_nsx_tempest_plugin.tests.api.test_v2_fwaas.TestFwaasV2Ops.test_delete_fw_policy_when_in_use [1.892236s] ... ok {0} vmware_nsx_tempest_plugin.tests.api.test_v2_fwaas.TestFwaasV2Ops.test_delete_fw_rule_when_in_use [1.701605s] ... ok {0} vmware_nsx_tempest_plugin.tests.api.test_v2_fwaas.TestFwaasV2Ops.test_fwaas_basic_icmp [1.624340s] ... ok {0} vmware_nsx_tempest_plugin.tests.api.test_v2_fwaas.TestFwaasV2Ops.test_fwaas_basic_tcp [1.773771s] ... ok {0} vmware_nsx_tempest_plugin.tests.api.test_v2_fwaas.TestFwaasV2Ops.test_fwaas_basic_udp [2.053368s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_ipv6_security_groups.IPv6SecurityGroupsTest.test_create_security_group_with_ipv6_port [136.341919s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_mac_learning.NSXv3MacLearningTest.test_create_enable_mac_learning_port_delete [132.376333s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_mac_learning.NSXv3MacLearningTest.test_create_mac_learning_port [65.277257s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_mac_learning.NSXv3MacLearningTest.test_create_mac_learning_port_enable_port_security_negative [34.565833s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_mac_learning.NSXv3MacLearningTest.test_create_toggle_mac_learning_port_delete [131.790629s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_mac_learning.NSXv3MacLearningTest.test_create_update_delete_mac_learning_port [69.979989s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_mac_learning.NSXv3MacLearningTest.test_delete_mac_learning_port [65.450064s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_mac_learning.NSXv3MacLearningTest.test_show_mac_learning_port [36.873404s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_routers.NSXv3RoutersTest.test_create_update_nsx_router [65.353485s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_routers.NSXv3RoutersTest.test_delete_nsx_router [66.594197s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_routers.NSXv3RoutersTest.test_deploy_router_ha_with_relocation_enable_disable [109.863283s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_routers.NSXv3RoutersTest.test_deploy_router_ha_with_relocation_enabled [41.153165s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_security_groups.NSXv3SecGroupTest.test_check_nsx_security_group_rule_tag_at_backend [70.050503s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_security_groups.NSXv3SecGroupTest.test_create_nsx_security_group_rule [131.097687s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_security_groups.NSXv3SecGroupTest.test_create_sec_group_with_0_0_0_0_remote_ip_prefix [100.059438s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_security_groups.NSXv3SecGroupTest.test_create_update_nsx_security_group [76.878271s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_security_groups.NSXv3SecGroupTest.test_delete_nsx_security_group [358.910515s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.api.test_nsx_security_groups.NSXv3SecGroupTest.test_delete_nsx_security_group_rule [100.178939s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.scenario.test_mdproxy_policy.TestMDProxyPolicy.test_mdproxy_with_multiple_ports_on_network [257.182416s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.scenario.test_nsx_port_security.TestNSXv3PortSecurityScenario.test_create_servers_with_port_security_and_check_traffic [391.070025s] ... ok {0} vmware_nsx_tempest_plugin.tests.nsxv3.scenario.test_octavia_loadbalancers.OctaviaRoundRobin.test_create_update_verify_backend_octavia_lb [1002.545886s] ... ok ====== Totals ====== Ran: 28 tests in 3712.8594 sec. - Passed: 28 - Skipped: 0 - Expected Fail: 0 - Unexpected Success: 0 - Failed: 0 Sum of execute time for each test: 3542.1673 sec. Change-Id: I232936a11af87149084ee5f47b3335f191cdd0f5
This commit is contained in:
parent
1a3988e67c
commit
dcd90b8b66
@ -1613,10 +1613,10 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
session_persistence["type"] = persistence_type
|
session_persistence["type"] = persistence_type
|
||||||
if persistence_cookie_name:
|
if persistence_cookie_name:
|
||||||
session_persistence["cookie_name"] = persistence_cookie_name
|
session_persistence["cookie_name"] = persistence_cookie_name
|
||||||
|
|
||||||
if lb_id is None:
|
if lb_id is None:
|
||||||
lb_name = data_utils.rand_name(self.namestart)
|
lb_name = data_utils.rand_name(self.namestart)
|
||||||
if external:
|
if external:
|
||||||
|
vip_subnet_id = external_subnet
|
||||||
self.loadbalancer = self.\
|
self.loadbalancer = self.\
|
||||||
octavia_admin_client.\
|
octavia_admin_client.\
|
||||||
create_octavia_load_balancer(
|
create_octavia_load_balancer(
|
||||||
@ -1633,7 +1633,12 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
vip_qos_policy_id=qos_policy_id,
|
vip_qos_policy_id=qos_policy_id,
|
||||||
admin_state_up=True)['loadbalancer']
|
admin_state_up=True)['loadbalancer']
|
||||||
lb_id = self.loadbalancer['id']
|
lb_id = self.loadbalancer['id']
|
||||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
if external:
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id,
|
||||||
|
operating_status='OFFLINE')
|
||||||
|
else:
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
if not no_cleanup:
|
if not no_cleanup:
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
self.octavia_admin_client.
|
self.octavia_admin_client.
|
||||||
@ -1655,7 +1660,12 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
self.octavia_admin_listener_client.
|
self.octavia_admin_listener_client.
|
||||||
delete_octavia_listener,
|
delete_octavia_listener,
|
||||||
self.listener['id'])
|
self.listener['id'])
|
||||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
if external:
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id,
|
||||||
|
operating_status='OFFLINE')
|
||||||
|
else:
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
if l7policy and action != 'REDIRECT_TO_POOL':
|
if l7policy and action != 'REDIRECT_TO_POOL':
|
||||||
l7p = self.octavia_admin_l7policies_client.\
|
l7p = self.octavia_admin_l7policies_client.\
|
||||||
create_octavia_l7policies(listener_id=self.listener['id'],
|
create_octavia_l7policies(listener_id=self.listener['id'],
|
||||||
@ -1675,11 +1685,21 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
protocol=protocol_type,
|
protocol=protocol_type,
|
||||||
name=lb_name)
|
name=lb_name)
|
||||||
pool_id = self.pool['pool']['id']
|
pool_id = self.pool['pool']['id']
|
||||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
if external:
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id,
|
||||||
|
operating_status='OFFLINE')
|
||||||
|
else:
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
self.octavia_admin_listener_client.\
|
self.octavia_admin_listener_client.\
|
||||||
update_octavia_listener(default_pool_id=pool_id,
|
update_octavia_listener(default_pool_id=pool_id,
|
||||||
listener_id=self.listener['id'])
|
listener_id=self.listener['id'])
|
||||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
if external:
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id,
|
||||||
|
operating_status='OFFLINE')
|
||||||
|
else:
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
else:
|
else:
|
||||||
if barbican:
|
if barbican:
|
||||||
protocol_type = pool_protocol
|
protocol_type = pool_protocol
|
||||||
@ -1689,7 +1709,12 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
protocol=protocol_type,
|
protocol=protocol_type,
|
||||||
name=lb_name,
|
name=lb_name,
|
||||||
session_persistence=session_persistence)
|
session_persistence=session_persistence)
|
||||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
if external:
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id,
|
||||||
|
operating_status='OFFLINE')
|
||||||
|
else:
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
pool_id = self.pool['pool']['id']
|
pool_id = self.pool['pool']['id']
|
||||||
if not no_cleanup:
|
if not no_cleanup:
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
@ -1701,7 +1726,12 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
create_octavia_hm(pool_id=pool_id, type=hm_type, delay=delay,
|
create_octavia_hm(pool_id=pool_id, type=hm_type, delay=delay,
|
||||||
timeout=timeout, max_retries=max_retries,
|
timeout=timeout, max_retries=max_retries,
|
||||||
name=lb_name)
|
name=lb_name)
|
||||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
if external:
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id,
|
||||||
|
operating_status='OFFLINE')
|
||||||
|
else:
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
if not no_cleanup:
|
if not no_cleanup:
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
self.octavia_hm_client.
|
self.octavia_hm_client.
|
||||||
@ -1720,7 +1750,10 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
m = self.topology_servers[server_name]
|
m = self.topology_servers[server_name]
|
||||||
fixed_ip_address = m['addresses'][x][0]['addr']
|
fixed_ip_address = m['addresses'][x][0]['addr']
|
||||||
else:
|
else:
|
||||||
fixed_ip_address = fip_data['fixed_ip_address']
|
if external:
|
||||||
|
fixed_ip_address = fip_data['floating_ip_address']
|
||||||
|
else:
|
||||||
|
fixed_ip_address = fip_data['fixed_ip_address']
|
||||||
if fip_disassociate is None:
|
if fip_disassociate is None:
|
||||||
kwargs = dict(port_id=None)
|
kwargs = dict(port_id=None)
|
||||||
self.cmgr_adm.floating_ips_client.\
|
self.cmgr_adm.floating_ips_client.\
|
||||||
@ -1783,6 +1816,9 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
client=self.cmgr_adm.floating_ips_client,
|
client=self.cmgr_adm.floating_ips_client,
|
||||||
port_id=self.loadbalancer['vip_port_id'])
|
port_id=self.loadbalancer['vip_port_id'])
|
||||||
self.vip_ip_address = vip_fip['floating_ip_address']
|
self.vip_ip_address = vip_fip['floating_ip_address']
|
||||||
|
else:
|
||||||
|
self.vip_ip_address = self.loadbalancer['vip_address']
|
||||||
|
vip_fip = []
|
||||||
return dict(lb_id=lb_id,
|
return dict(lb_id=lb_id,
|
||||||
vip_address=self.vip_ip_address,
|
vip_address=self.vip_ip_address,
|
||||||
pool_id=pool_id,
|
pool_id=pool_id,
|
||||||
@ -1975,6 +2011,11 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
network_lbaas_1 = self.\
|
network_lbaas_1 = self.\
|
||||||
create_topology_network(name,
|
create_topology_network(name,
|
||||||
networks_client=networks_client)
|
networks_client=networks_client)
|
||||||
|
# verify realization state of network
|
||||||
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
||||||
|
nsx_network = self.nsxp.get_logical_switch(network_lbaas_1['name'],
|
||||||
|
network_lbaas_1['id'])
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsx_network))
|
||||||
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||||
sec_client = self.cmgr_adm.security_groups_client
|
sec_client = self.cmgr_adm.security_groups_client
|
||||||
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
||||||
@ -2011,6 +2052,11 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
[{"start": "2.0.0.2", "end": "2.0.0.254"}],
|
[{"start": "2.0.0.2", "end": "2.0.0.254"}],
|
||||||
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
||||||
create_floating_ip = True
|
create_floating_ip = True
|
||||||
|
# Verify realization state of sg after rule add
|
||||||
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
||||||
|
sg = self.nsxp.get_firewall_section(self.sg['name'], self.sg['id'],
|
||||||
|
os_tenant_id='default')
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(sg))
|
||||||
subnet_client = self.cmgr_adm.subnets_client
|
subnet_client = self.cmgr_adm.subnets_client
|
||||||
subnet_lbaas = subnet_client.create_subnet(**body)
|
subnet_lbaas = subnet_client.create_subnet(**body)
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
@ -2029,6 +2075,11 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
security_groups=[{'name': self.sg['name']}],
|
security_groups=[{'name': self.sg['name']}],
|
||||||
image_id=image_id, clients=self.cmgr_adm,
|
image_id=image_id, clients=self.cmgr_adm,
|
||||||
create_floating_ip=create_floating_ip)
|
create_floating_ip=create_floating_ip)
|
||||||
|
# Verify realization status of network on vm creation
|
||||||
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
||||||
|
nsx_network = self.nsxp.get_logical_switch(network_lbaas_1['name'],
|
||||||
|
network_lbaas_1['id'])
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsx_network))
|
||||||
return dict(router=router_lbaas, subnet=subnet_lbaas,
|
return dict(router=router_lbaas, subnet=subnet_lbaas,
|
||||||
network=network_lbaas_1)
|
network=network_lbaas_1)
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ class NSXPClient(object):
|
|||||||
data=jsonutils.dumps(body))
|
data=jsonutils.dumps(body))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def get_logical_resources(self, endpoint):
|
def get_logical_resources(self, endpoint, realize_state=False):
|
||||||
"""
|
"""
|
||||||
Get logical resources based on the endpoint
|
Get logical resources based on the endpoint
|
||||||
|
|
||||||
@ -177,6 +177,8 @@ class NSXPClient(object):
|
|||||||
response = self.get(endpoint=endpoint)
|
response = self.get(endpoint=endpoint)
|
||||||
res_json = response.json()
|
res_json = response.json()
|
||||||
cursor = res_json.get("cursor")
|
cursor = res_json.get("cursor")
|
||||||
|
if realize_state:
|
||||||
|
return res_json
|
||||||
if res_json.get("results"):
|
if res_json.get("results"):
|
||||||
results.extend(res_json["results"])
|
results.extend(res_json["results"])
|
||||||
while cursor:
|
while cursor:
|
||||||
@ -199,16 +201,19 @@ class NSXPClient(object):
|
|||||||
Verify realized state of nsx resource
|
Verify realized state of nsx resource
|
||||||
"""
|
"""
|
||||||
if nsx_resources['path']:
|
if nsx_resources['path']:
|
||||||
endpoint = ("realized-state/realized-entities?intent_path=%s"
|
endpoint = ("realized-state/status?intent_path=%s"
|
||||||
% (nsx_resources['path']))
|
% (nsx_resources['path']))
|
||||||
realize_state = self.get_logical_resources(endpoint)
|
realize_state = self.get_logical_resources(endpoint,
|
||||||
if realize_state is not None:
|
realize_state=True)
|
||||||
com_list = [c['state'] for c in realize_state]
|
s1 = realize_state['consolidated_status']['consolidated_status']
|
||||||
if com_list.count("REALIZED") == len(realize_state):
|
publish_status = realize_state['publish_status']
|
||||||
LOG.info("%s nsx_resources is realized" % nsx_resources)
|
if s1 == "SUCCESS" and publish_status == 'REALIZED':
|
||||||
state = True
|
LOG.info("%s entity is realized" % nsx_resources['path'])
|
||||||
|
state = True
|
||||||
else:
|
else:
|
||||||
LOG.error(" %s nsx_resources is not realized" % nsx_resources)
|
LOG.error("%s entity is not realized" % nsx_resources['path'])
|
||||||
|
LOG.error("consolidated state: %s" % s1)
|
||||||
|
LOG.error("Publish status: %s" % publish_status)
|
||||||
state = False
|
state = False
|
||||||
else:
|
else:
|
||||||
LOG.warning("Path fo nsx resource is not set!")
|
LOG.warning("Path fo nsx resource is not set!")
|
||||||
|
@ -11,11 +11,15 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
from tempest import config
|
from tempest import config
|
||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
from tempest.lib import exceptions
|
from tempest.lib import exceptions
|
||||||
|
|
||||||
|
from vmware_nsx_tempest_plugin.common import constants
|
||||||
from vmware_nsx_tempest_plugin.lib import feature_manager
|
from vmware_nsx_tempest_plugin.lib import feature_manager
|
||||||
|
from vmware_nsx_tempest_plugin.services import nsxp_client
|
||||||
CONF = config.CONF
|
CONF = config.CONF
|
||||||
|
|
||||||
|
|
||||||
@ -44,6 +48,9 @@ class IPv6SecurityGroupsTest(feature_manager.FeatureManager):
|
|||||||
def setup_clients(cls):
|
def setup_clients(cls):
|
||||||
super(IPv6SecurityGroupsTest, cls).setup_clients()
|
super(IPv6SecurityGroupsTest, cls).setup_clients()
|
||||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||||
|
cls.nsxp = nsxp_client.NSXPClient(CONF.nsxv3.nsx_manager,
|
||||||
|
CONF.nsxv3.nsx_user,
|
||||||
|
CONF.nsxv3.nsx_password)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def resource_setup(cls):
|
def resource_setup(cls):
|
||||||
@ -152,6 +159,10 @@ class IPv6SecurityGroupsTest(feature_manager.FeatureManager):
|
|||||||
port = body['port']
|
port = body['port']
|
||||||
for sg in port["security_groups"]:
|
for sg in port["security_groups"]:
|
||||||
self.assertEqual(sg, sec_group['id'])
|
self.assertEqual(sg, sec_group['id'])
|
||||||
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
||||||
|
nsx_network = self.nsxp.get_logical_switch(network['name'],
|
||||||
|
network['id'])
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsx_network))
|
||||||
|
|
||||||
@decorators.attr(type=['nsxv3', 'positive'])
|
@decorators.attr(type=['nsxv3', 'positive'])
|
||||||
@decorators.idempotent_id('0604fee9-011e-4b5e-886a-620669a8c2f5')
|
@decorators.idempotent_id('0604fee9-011e-4b5e-886a-620669a8c2f5')
|
||||||
|
@ -172,8 +172,21 @@ class NSXv3MacLearningTest(base.BaseNetworkTest):
|
|||||||
def test_create_mac_learning_port(self):
|
def test_create_mac_learning_port(self):
|
||||||
"""
|
"""
|
||||||
Test creation of MAC Learning enabled port
|
Test creation of MAC Learning enabled port
|
||||||
"""
|
"""
|
||||||
|
nsxp_network = self.nsxp.get_logical_switch(self.network['name'],
|
||||||
|
self.network['id'])
|
||||||
|
if 'realization_id' in nsxp_network.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_network))
|
||||||
port = self._create_mac_learn_enabled_port(self.network)
|
port = self._create_mac_learn_enabled_port(self.network)
|
||||||
|
# Verify realization state of segment
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
if 'realization_id' in nsxp_network.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_network))
|
||||||
|
# Verify realization of segment port
|
||||||
|
nsxp_port = self.nsxp.get_logical_port(port['name'],
|
||||||
|
nsxp_network)
|
||||||
|
if 'realization_id' in nsxp_port.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_port))
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
self._delete_port, port)
|
self._delete_port, port)
|
||||||
if CONF.network.backend == 'nsxp':
|
if CONF.network.backend == 'nsxp':
|
||||||
@ -192,6 +205,17 @@ class NSXv3MacLearningTest(base.BaseNetworkTest):
|
|||||||
vanilla_port = self.create_port(self.network, name=vanilla_name)
|
vanilla_port = self.create_port(self.network, name=vanilla_name)
|
||||||
if CONF.network.backend == 'nsxp':
|
if CONF.network.backend == 'nsxp':
|
||||||
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
# Verify realization of segment
|
||||||
|
nsxp_network = self.nsxp.get_logical_switch(self.network['name'],
|
||||||
|
self.network['id'])
|
||||||
|
if 'realization_id' in nsxp_network.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_network))
|
||||||
|
# Verify realization of segment port
|
||||||
|
nsxp_port = self.nsxp.get_logical_port(vanilla_port['name'],
|
||||||
|
nsxp_network)
|
||||||
|
if 'realization_id' in nsxp_port.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_port))
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
self._delete_port, mac_lrn_port)
|
self._delete_port, mac_lrn_port)
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
@ -331,6 +355,17 @@ class NSXv3MacLearningTest(base.BaseNetworkTest):
|
|||||||
"""
|
"""
|
||||||
test_port_name = data_utils.rand_name('port-')
|
test_port_name = data_utils.rand_name('port-')
|
||||||
test_port = self.create_port(self.network, name=test_port_name)
|
test_port = self.create_port(self.network, name=test_port_name)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
# Verify realization state of segment
|
||||||
|
nsxp_network = self.nsxp.get_logical_switch(self.network['name'],
|
||||||
|
self.network['id'])
|
||||||
|
if 'realization_id' in nsxp_network.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_network))
|
||||||
|
# Verify realization of segment port
|
||||||
|
nsxp_port = self.nsxp.get_logical_port(test_port['name'],
|
||||||
|
nsxp_network)
|
||||||
|
if 'realization_id' in nsxp_port.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_port))
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
self._delete_port, test_port)
|
self._delete_port, test_port)
|
||||||
if CONF.network.backend == 'nsxp':
|
if CONF.network.backend == 'nsxp':
|
||||||
@ -436,9 +471,26 @@ class NSXv3MacLearningTest(base.BaseNetworkTest):
|
|||||||
Update port - enable port security(should fail)
|
Update port - enable port security(should fail)
|
||||||
"""
|
"""
|
||||||
test_port = self._create_mac_learn_enabled_port(self.network)
|
test_port = self._create_mac_learn_enabled_port(self.network)
|
||||||
|
# Verify realization state of segment
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
nsxp_network = self.nsxp.get_logical_switch(self.network['name'],
|
||||||
|
self.network['id'])
|
||||||
|
if 'realization_id' in nsxp_network.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_network))
|
||||||
|
# Verify realization of segment port
|
||||||
|
nsxp_port = self.nsxp.get_logical_port(test_port['name'],
|
||||||
|
nsxp_network)
|
||||||
|
if 'realization_id' in nsxp_port.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_port))
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
self._delete_port, test_port)
|
self._delete_port, test_port)
|
||||||
port_opts = {}
|
port_opts = {}
|
||||||
port_opts['port_security_enabled'] = True
|
port_opts['port_security_enabled'] = True
|
||||||
self.assertRaises(ex.BadRequest, self.update_port, test_port,
|
self.assertRaises(ex.BadRequest, self.update_port, test_port,
|
||||||
**port_opts)
|
**port_opts)
|
||||||
|
# Verify realization state of segment
|
||||||
|
if 'realization_id' in nsxp_network.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_network))
|
||||||
|
# Verify realization of segment port
|
||||||
|
if 'realization_id' in nsxp_port.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_port))
|
||||||
|
@ -316,6 +316,9 @@ class ProviderNetworks(feature_manager.FeatureManager):
|
|||||||
provider_network = self.provider_networks_topoloy(
|
provider_network = self.provider_networks_topoloy(
|
||||||
constants.VLAN_TYPE,
|
constants.VLAN_TYPE,
|
||||||
tz_id=self.vlan_id)
|
tz_id=self.vlan_id)
|
||||||
|
nsx_network = self.nsx.get_logical_switch(provider_network['name'],
|
||||||
|
provider_network['id'])
|
||||||
|
self.assertEqual(4050, nsx_network['vlan'])
|
||||||
subnet_client = self.cmgr_adm.subnets_client
|
subnet_client = self.cmgr_adm.subnets_client
|
||||||
router = self.create_topology_router("rtr-provider")
|
router = self.create_topology_router("rtr-provider")
|
||||||
subnet_name = provider_network['name'] + '_subnet'
|
subnet_name = provider_network['name'] + '_subnet'
|
||||||
@ -341,6 +344,9 @@ class ProviderNetworks(feature_manager.FeatureManager):
|
|||||||
constants.VLAN_TYPE,
|
constants.VLAN_TYPE,
|
||||||
tz_id=self.vlan_id,
|
tz_id=self.vlan_id,
|
||||||
vlan_id_unique=1004)
|
vlan_id_unique=1004)
|
||||||
|
nsx_network1 = self.nsx.get_logical_switch(provider_network1['name'],
|
||||||
|
provider_network1['id'])
|
||||||
|
self.assertEqual(1004, nsx_network1['vlan'])
|
||||||
subnet_name = provider_network1['name'] + '_subnet1'
|
subnet_name = provider_network1['name'] + '_subnet1'
|
||||||
kwargs = {"enable_dhcp": "True"}
|
kwargs = {"enable_dhcp": "True"}
|
||||||
self.create_topology_subnet(
|
self.create_topology_subnet(
|
||||||
@ -367,21 +373,7 @@ class ProviderNetworks(feature_manager.FeatureManager):
|
|||||||
kwargs = dict(tenant_id=provider_network['tenant_id'],
|
kwargs = dict(tenant_id=provider_network['tenant_id'],
|
||||||
security_group_rules_client=sec_rule_client,
|
security_group_rules_client=sec_rule_client,
|
||||||
security_groups_client=sec_client)
|
security_groups_client=sec_client)
|
||||||
sg = self.create_topology_security_group(**kwargs)
|
self.create_topology_security_group(**kwargs)
|
||||||
self.create_topology_instance(
|
|
||||||
"provider-server1", [provider_network],
|
|
||||||
security_groups=[{'name': sg['name']}],
|
|
||||||
clients=self.cmgr_adm)
|
|
||||||
self.create_topology_instance(
|
|
||||||
"provider-server2", [provider_network1],
|
|
||||||
security_groups=[{'name': sg['name']}],
|
|
||||||
clients=self.cmgr_adm)
|
|
||||||
for server_name in self.topology_servers.keys():
|
|
||||||
server = self.servers_details[server_name].server
|
|
||||||
fip_data = server.get('floating_ips')[0]
|
|
||||||
fip = fip_data['floating_ip_address']
|
|
||||||
self.verify_server_ssh(
|
|
||||||
server=server, floating_ip=fip)
|
|
||||||
|
|
||||||
@decorators.attr(type='nsxv3')
|
@decorators.attr(type='nsxv3')
|
||||||
@decorators.idempotent_id('7708b7f2-94c1-4d9e-9bab-c1a929c54ab9')
|
@decorators.idempotent_id('7708b7f2-94c1-4d9e-9bab-c1a929c54ab9')
|
||||||
|
@ -268,6 +268,10 @@ class TestMDProxyPolicy(feature_manager.FeatureManager):
|
|||||||
self.deploy_mdproxy_topology()
|
self.deploy_mdproxy_topology()
|
||||||
# Boot 2nd vm on same network
|
# Boot 2nd vm on same network
|
||||||
network = self.topology_networks["network_mdproxy"]
|
network = self.topology_networks["network_mdproxy"]
|
||||||
|
nsxp_network = self.nsxp_client.get_logical_switch(network['name'],
|
||||||
|
network['id'])
|
||||||
|
if 'realization_id' in nsxp_network.keys():
|
||||||
|
self.assertTrue(self.nsxp.verify_realized_state(nsxp_network))
|
||||||
self.create_topology_instance(
|
self.create_topology_instance(
|
||||||
"server_mdproxy_2", [network])
|
"server_mdproxy_2", [network])
|
||||||
# Verify Metadata from vm1
|
# Verify Metadata from vm1
|
||||||
|
@ -1302,6 +1302,8 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
max_retries=self.hm_max_retries,
|
max_retries=self.hm_max_retries,
|
||||||
delay=self.hm_delay, default_pool=True)
|
delay=self.hm_delay, default_pool=True)
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv3')
|
||||||
|
@decorators.idempotent_id('ca5c4368-6768-4b7a-8704-3844b11b1b35')
|
||||||
def test_delete_octavia_lb_with_cascade(self):
|
def test_delete_octavia_lb_with_cascade(self):
|
||||||
"""
|
"""
|
||||||
Create octavia LB and delete it with --cascade options
|
Create octavia LB and delete it with --cascade options
|
||||||
@ -1335,6 +1337,8 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
noerr = 1
|
noerr = 1
|
||||||
self.assertTrue(noerr, list_lb)
|
self.assertTrue(noerr, list_lb)
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv3')
|
||||||
|
@decorators.idempotent_id('ca5c4368-6768-4b7a-8704-3844b11b1b34')
|
||||||
def test_create_update_verify_backend_octavia_lb(self):
|
def test_create_update_verify_backend_octavia_lb(self):
|
||||||
"""
|
"""
|
||||||
Create octavia LB and delete it with --cascade options
|
Create octavia LB and delete it with --cascade options
|
||||||
@ -1346,12 +1350,13 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
network_id=net_id)['port']['id']
|
network_id=net_id)['port']['id']
|
||||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
self.cmgr_adm.ports_client.delete_port, port_id)
|
self.cmgr_adm.ports_client.delete_port, port_id)
|
||||||
self.create_project_octavia(protocol_type="HTTPS", protocol_port="80",
|
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
|
||||||
lb_algorithm="ROUND_ROBIN",
|
lb_algorithm="ROUND_ROBIN",
|
||||||
vip_port_id=port_id, hm_type='PING',
|
vip_port_id=port_id, hm_type='PING',
|
||||||
timeout=self.hm_timeout,
|
timeout=self.hm_timeout,
|
||||||
max_retries=self.hm_max_retries,
|
max_retries=self.hm_max_retries,
|
||||||
delay=self.hm_delay, default_pool=True)
|
delay=self.hm_delay, default_pool=True)
|
||||||
|
self.check_project_lbaas()
|
||||||
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
||||||
backend_name = diction['router']['router']['id'][:5]
|
backend_name = diction['router']['router']['id'][:5]
|
||||||
backend_status = self.nsxp.\
|
backend_status = self.nsxp.\
|
||||||
|
Loading…
x
Reference in New Issue
Block a user