
Fixes some new PEP8 errors that appear with jobs running on new ubuntu version, and temporarily filters out the larger I202 error ("Additional newline in a group of imports"). This patch updates the hacking and flake8-import-order versions. Copied from: https://review.opendev.org/c/openstack/ovn-octavia-provider/+/936855 Change-Id: Ice4513eedc4fd6f054c19d1854eff00aeb5c35a1
1088 lines
50 KiB
Python
1088 lines
50 KiB
Python
# Copyright 2024 Red Hat, Inc.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
import random
|
|
import time
|
|
|
|
import testtools
|
|
|
|
import netaddr
|
|
from neutron_lib import constants
|
|
from neutron_tempest_plugin.common import ssh
|
|
from neutron_tempest_plugin.scenario import constants as neutron_constants
|
|
from oslo_log import log
|
|
from pyroute2 import IPRoute
|
|
from tempest.common import utils
|
|
from tempest.common import waiters
|
|
from tempest import config
|
|
from tempest.lib.common.utils import data_utils
|
|
from tempest.lib.common.utils import test_utils
|
|
from tempest.lib import decorators
|
|
|
|
from whitebox_neutron_tempest_plugin.common import constants as local_constants
|
|
from whitebox_neutron_tempest_plugin.common import utils as local_utils
|
|
from whitebox_neutron_tempest_plugin.tests.scenario import base
|
|
|
|
CONF = config.CONF
|
|
WB_CONF = config.CONF.whitebox_neutron_plugin_options
|
|
LOG = log.getLogger(__name__)
|
|
|
|
|
|
class OvnDvrBase(base.TrafficFlowTest, base.BaseTempestTestCaseOvn):
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(OvnDvrBase, cls).resource_setup()
|
|
cls.setup_api_microversion_fixture(
|
|
compute_microversion='2.74')
|
|
msg = "DVR is not enabled"
|
|
if len(cls.nodes) < 2:
|
|
raise cls.skipException(
|
|
"The tests require environment with at least 2 nodes")
|
|
cls.bgp_expose_tenant_networks = False
|
|
for node in cls.nodes:
|
|
if WB_CONF.openstack_type == 'devstack':
|
|
if not node['is_controller']:
|
|
continue
|
|
cls.check_service_setting(
|
|
host=node, service='',
|
|
config_files=[cls.neutron_conf],
|
|
param='enable_dvr')
|
|
cls.check_service_setting(
|
|
host=node, service='',
|
|
config_files=[cls.neutron_conf],
|
|
param='router_distributed')
|
|
cls.check_service_setting(
|
|
host=node, service='',
|
|
config_files=[WB_CONF.ml2_plugin_config],
|
|
section='ovn', param='enable_distributed_floating_ip')
|
|
# TODO(rsafrono) add code that defines
|
|
# cls.bgp_expose_tenant_networks on devstack
|
|
# in case such bgp environment will be ever created
|
|
if WB_CONF.openstack_type == 'podified':
|
|
config_files = cls.get_configs_of_service()
|
|
cls.check_service_setting(
|
|
{'client': cls.proxy_host_client},
|
|
config_files=config_files, section='ovn',
|
|
param='enable_distributed_floating_ip', msg=msg)
|
|
if WB_CONF.bgp:
|
|
for node in cls.nodes:
|
|
if node['is_networker'] and not node['is_controller']:
|
|
output = node['client'].exec_command(
|
|
"crudini --get {} DEFAULT "
|
|
"expose_tenant_networks || true".format(
|
|
WB_CONF.bgp_agent_config)).strip()
|
|
if output:
|
|
cls.bgp_expose_tenant_networks = eval(
|
|
output.capitalize())
|
|
break
|
|
else:
|
|
continue
|
|
|
|
def _setup(self, router=None):
|
|
router = self.create_router_by_client()
|
|
self.router_port = self.os_admin.network_client.list_ports(
|
|
device_id=router['id'],
|
|
device_owner=constants.DEVICE_OWNER_ROUTER_GW)['ports'][0]
|
|
self.chassis_list = self.get_router_gateway_chassis_list(
|
|
self.router_port['id'])
|
|
self.chassis_name = self.get_router_gateway_chassis_by_id(
|
|
self.chassis_list[0])
|
|
LOG.debug("router chassis name = %s", self.chassis_name)
|
|
|
|
# Since we are going to spawn VMs with 'host' option which
|
|
# is available only for admin user, we create security group
|
|
# and keypair also as admin
|
|
secgroup = self.os_admin.network_client.create_security_group(
|
|
name=data_utils.rand_name('secgroup'))
|
|
self.security_groups.append(secgroup['security_group'])
|
|
self.os_admin.network_client.create_security_group_rule(
|
|
security_group_id=secgroup['security_group']['id'],
|
|
protocol=constants.PROTO_NAME_ICMP,
|
|
direction=constants.INGRESS_DIRECTION)
|
|
self.os_admin.network_client.create_security_group_rule(
|
|
security_group_id=secgroup['security_group']['id'],
|
|
protocol=constants.PROTO_NAME_TCP,
|
|
direction=constants.INGRESS_DIRECTION,
|
|
port_range_min=22,
|
|
port_range_max=22)
|
|
self.addCleanup(
|
|
test_utils.call_and_ignore_notfound_exc,
|
|
self.os_admin.network_client.delete_security_group,
|
|
secgroup['security_group']['id'])
|
|
self.keypair = self.os_admin.keypairs_client.create_keypair(
|
|
name=data_utils.rand_name('keypair'))['keypair']
|
|
self.network = self.create_network()
|
|
self.subnet = self.create_subnet(self.network)
|
|
self.create_router_interface(router['id'], self.subnet['id'])
|
|
|
|
# We create VMs on compute hosts that are not on the same host
|
|
# as router gateway port, i.e. the test is capable to work even
|
|
# on environments that schedule ovn routers on compute nodes
|
|
self.exclude_hosts = [self.chassis_name]
|
|
|
|
if self.get_network_type(self.network['id']) == 'vlan':
|
|
# This helps to avoid false positives with vlan+dvr,see BZ2192633
|
|
self.ignore_outbound = True
|
|
else:
|
|
self.ignore_outbound = False
|
|
|
|
self.server = self._create_server(
|
|
exclude_hosts=self.exclude_hosts)
|
|
|
|
self.compute = self.get_host_shortname_for_server(
|
|
self.server['server']['id'])
|
|
self.exclude_hosts.append(self.compute)
|
|
|
|
self.server_ssh_client = ssh.Client(
|
|
self.server['fip']['floating_ip_address'],
|
|
CONF.validation.image_ssh_user,
|
|
pkey=self.keypair['private_key'])
|
|
self.fip_port_mac = self.get_fip_port_details(
|
|
self.server['fip'])['mac_address']
|
|
LOG.debug("FIP port MAC: %s", self.fip_port_mac)
|
|
|
|
|
|
class OvnDvrTest(OvnDvrBase):
|
|
|
|
@decorators.idempotent_id('1561819a-b19f-45a7-8131-d001b2d7c945')
|
|
def test_validate_floatingip_compute_egress(self):
|
|
"""Check that VM with a floating ip goes out through compute node.
|
|
|
|
The aim of the test is to verify egress DVR functionality.
|
|
Currently only OVN DVR environments are supported.
|
|
|
|
Topology: Any topology with separate controller and compute
|
|
nodes is valid for running the test.
|
|
Recommended topology: 3 controller nodes with networking services and
|
|
2 compute nodes with configured access to external network.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, pingable and loginable security groups,
|
|
keypair, run a vm instance (server) and create a fip for it.
|
|
2. Check on which compute node the server runs.
|
|
3. Capture traffic on all nodes and ping an IP address in the external
|
|
network from the server (see details in _check_north_south_flow())
|
|
4. Search for the server fip mac address in all capture files.
|
|
5. Verify that we found the server fip mac address in the capture
|
|
file on the compute node where the server runs and not found in
|
|
captures on other nodes.
|
|
|
|
"""
|
|
self._setup()
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[self.compute],
|
|
expected_mac=self.fip_port_mac,
|
|
ssh_client=self.server_ssh_client)
|
|
|
|
@decorators.idempotent_id('682167ba-6250-4f3c-8fdf-1c768825cb8c')
|
|
def test_validate_floatingip_compute_ingress_delete_fip_restart_instance(
|
|
self):
|
|
"""Check that traffic to a VM with a floating ip enters through
|
|
compute node.
|
|
|
|
The aim of the test is to verify ingress DVR functionality.
|
|
Currently only OVN DVR environments are supported.
|
|
The test also removes the first floating ip from the server and adds
|
|
a new one, verifying that it does not affect routing to ingress
|
|
traffic.
|
|
Finally, it verifies that an instance reboot does not affect the
|
|
connectivity or/and the routing.
|
|
|
|
Topology: Any topology with separate controller and compute
|
|
nodes is valid for running the test.
|
|
Recommended topology: 3 controller nodes with networking services and
|
|
2 compute nodes with configured access to external network.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, pingable and loginable security groups,
|
|
keypair, run a vm instance (server) and create a fip for it.
|
|
2. Check on which compute node the server runs.
|
|
3. Capture traffic on all nodes and ping the server from an IP address
|
|
in the external network (see details in _check_north_south_flow())
|
|
4. Search for the server fip mac address in all capture files.
|
|
5. Verify that we found the server fip mac address in the capture
|
|
file on the compute node where the server runs and not found in
|
|
captures on other nodes.
|
|
6. Remove the FIP from the test server and add a new one to it;
|
|
verify that traffic again is passing through compute node where
|
|
the server was spawned
|
|
7. Restart the server and verify that routing has not changed
|
|
|
|
"""
|
|
def _get_extra_parameters():
|
|
ssh_client = None
|
|
expected_routing_nodes = [self.compute]
|
|
try:
|
|
self.check_remote_connectivity(
|
|
self.proxy_host_client,
|
|
self.server['fip']['floating_ip_address'],
|
|
timeout=60)
|
|
ssh_client = self.proxy_host_client
|
|
except AssertionError:
|
|
# In case VM under test is not responding from the proxy host
|
|
# this means that there is a limitation of the environment and
|
|
# as a fallback scenario we create an additional VM
|
|
# (on external network) that we'll use for pinging the VM under
|
|
# test.
|
|
LOG.debug(
|
|
"VM is not pingable from the proxy host. "
|
|
"Creating a VM on external_network to complete "
|
|
"the required setup.")
|
|
self.external_network = self.client.show_network(
|
|
CONF.network.public_network_id)['network']
|
|
ext_vm = self._create_server(
|
|
network=self.external_network,
|
|
create_floating_ip=False,
|
|
use_admin_client=True)
|
|
self.ext_vm_ssh_client = ssh.Client(
|
|
ext_vm['port']['fixed_ips'][0]['ip_address'],
|
|
self.username, pkey=self.keypair['private_key'])
|
|
ssh_client = self.ext_vm_ssh_client
|
|
ext_vm_host = self.get_host_shortname_for_server(
|
|
ext_vm['server']['id'])
|
|
# expected_routing_nodes should not have duplicates
|
|
expected_routing_nodes = list(set([self.compute, ext_vm_host]))
|
|
return ssh_client, expected_routing_nodes
|
|
|
|
self._setup()
|
|
ssh_client, expected_routing_nodes = _get_extra_parameters()
|
|
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.server['fip']['floating_ip_address'],
|
|
expected_routing_nodes=expected_routing_nodes,
|
|
expected_mac=self.fip_port_mac,
|
|
ssh_client=ssh_client)
|
|
|
|
# Delete fip
|
|
LOG.debug('Deleting floating ip')
|
|
self.os_admin.network_client.delete_floatingip(
|
|
self.server['fip']['id'])
|
|
# Add a new fip to the test server and make sure that routing is
|
|
# via compute again.
|
|
LOG.debug('Adding new floating ip to vm')
|
|
fip = self.os_admin.network_client.create_floatingip(
|
|
port_id=self.server['port']['id'],
|
|
floating_network_id=CONF.network.public_network_id)['floatingip']
|
|
fip_port_mac = self.get_fip_port_details(fip)['mac_address']
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=fip['floating_ip_address'],
|
|
expected_routing_nodes=expected_routing_nodes,
|
|
expected_mac=fip_port_mac,
|
|
ssh_client=ssh_client)
|
|
|
|
# Reboot the server and make sure that routing is still via compute.
|
|
LOG.debug('Rebooting vm')
|
|
self.os_admin.servers_client.reboot_server(
|
|
self.server['server']['id'], type='SOFT')
|
|
waiters.wait_for_server_status(self.os_admin.servers_client,
|
|
self.server['server']['id'],
|
|
neutron_constants.SERVER_STATUS_ACTIVE)
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=fip['floating_ip_address'],
|
|
expected_routing_nodes=expected_routing_nodes,
|
|
expected_mac=fip_port_mac,
|
|
ssh_client=ssh_client)
|
|
|
|
@decorators.idempotent_id('0fcf9f97-6368-4c5d-a5f5-ff8a7643e3b6')
|
|
def test_validate_fip2fip_compute(self):
|
|
"""Check that traffic between VMs with a floating ip running on
|
|
different compute nodes passes only through the compute nodes.
|
|
|
|
The aim of the test is to verify fip to fip DVR functionality.
|
|
Currently only OVN DVR environments are supported.
|
|
|
|
Topology: Any topology with separate controller and compute
|
|
nodes is valid for running the test.
|
|
Recommended topology: 3 controller nodes with networking services and
|
|
2 compute nodes with configured access to external network.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, pingable and loginable security groups,
|
|
keypair, run 2 vm instances (servers) and create a fip for each.
|
|
2. Check on which compute node the server runs.
|
|
3. Capture traffic on all nodes and ping one server from the other
|
|
in the external network (see details in _check_north_south_flow())
|
|
4. Search for the server fip mac address in all capture files.
|
|
5. Verify that we found the server fip mac address in the capture
|
|
file on compute nodes where both server run and not found in
|
|
captures on other nodes.
|
|
|
|
"""
|
|
self._setup()
|
|
server2 = self._create_server(exclude_hosts=self.exclude_hosts)
|
|
compute2 = self.get_host_shortname_for_server(
|
|
server2['server']['id'])
|
|
LOG.debug("compute = %s, compute2 = %s", self.compute, compute2)
|
|
if self.compute == compute2:
|
|
self.skipTest(
|
|
"Servers are running on same compute - the test can provide "
|
|
"wrong results, skipping")
|
|
|
|
# with FIP to FIP it is expected to see packets on both computes
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=server2['fip']['floating_ip_address'],
|
|
expected_routing_nodes=[self.compute, compute2],
|
|
expected_mac=self.fip_port_mac,
|
|
ssh_client=self.server_ssh_client)
|
|
|
|
@decorators.idempotent_id('f8fd0fbd-4ad3-4b0b-b805-6c59228bc5d8')
|
|
@testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
|
|
'Live migration is not available.')
|
|
@testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
|
|
'Less than 2 compute nodes, skipping multinode '
|
|
'tests.')
|
|
@decorators.attr(type='slow')
|
|
@utils.services('compute', 'network')
|
|
def test_validate_dvr_connectivity_live_migration_basic(self):
|
|
"""Check that after VM migration to a new compute node,
|
|
traffic is correctly routed through that new node.
|
|
|
|
The aim of the test is to verify egress DVR functionality
|
|
after a VM live migration.
|
|
Currently only OVN DVR environments are supported.
|
|
|
|
Topology: Two compute nodes are required for this test.
|
|
Recommended topology: 3 controller nodes with networking services and
|
|
2 compute nodes with configured access to external network.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, pingable and loginable security groups,
|
|
keypair, run two vm instances (self.server and server2) and create
|
|
a fip for each of them.
|
|
2. Check on which compute node each server runs.
|
|
3. North-South Traffic verification:
|
|
a. Capture traffic on all nodes and ping an IP address in the
|
|
external network from self.server (see details in
|
|
_check_north_south_flow()).
|
|
b. Search for the server fip mac address in all capture files.
|
|
c. Verify that we found the server fip mac address in the capture
|
|
file on the compute node where the server runs and not found in
|
|
captures on other nodes.
|
|
b. Repeat steps a to c using server2.
|
|
4. East-West Traffic verification:
|
|
a. Capture traffic on all nodes and ping server2 internal IP
|
|
address from self.server.
|
|
b. Search for both self.server and server2 internal mac addresses
|
|
in all capture files.
|
|
c. Verify that we found the server both mac addresses in the
|
|
capture file on both compute nodes where the servers run and
|
|
not found in captures on other nodes.
|
|
5. Apply live migration on both self.server and server2.
|
|
6. Verify that each server is migrated to a different compute node.
|
|
7. Repeat steps 3 and 4.
|
|
|
|
"""
|
|
self._setup()
|
|
server2 = self._create_server(
|
|
exclude_hosts=self.exclude_hosts)
|
|
|
|
server2_ip = server2['port']['fixed_ips'][0]['ip_address']
|
|
server2_mac = server2['port']['mac_address']
|
|
server2_fip_ip = server2['fip']['floating_ip_address']
|
|
server2_fip_mac = self.get_fip_port_details(
|
|
server2['fip'])['mac_address']
|
|
server2_ssh_client = ssh.Client(server2_fip_ip,
|
|
CONF.validation.image_ssh_user,
|
|
pkey=self.keypair['private_key'],
|
|
proxy_client=self.server_ssh_client)
|
|
server2_host = self.get_host_shortname_for_server(
|
|
server2['server']['id'])
|
|
|
|
# verify N/S connection with self.server
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[self.compute],
|
|
expected_mac=self.fip_port_mac,
|
|
ssh_client=self.server_ssh_client)
|
|
# verify N/S connection with server2
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[server2_host],
|
|
expected_mac=server2_fip_mac,
|
|
ssh_client=server2_ssh_client)
|
|
|
|
# verify E/W connection between self.server and server2
|
|
# remove duplicates
|
|
expected_routing_nodes = list(set([self.compute, server2_host]))
|
|
if len(expected_routing_nodes) == 1:
|
|
self.skipTest(
|
|
"Servers are running on same compute - Please check if "
|
|
"DifferentHostFilter is configured within the "
|
|
"NovaSchedulerDefaultFilters list")
|
|
self.check_east_west_icmp_flow(
|
|
dst_ip=server2_ip,
|
|
expected_routing_nodes=expected_routing_nodes,
|
|
expected_macs=(self.server['port']['mac_address'], server2_mac),
|
|
ssh_client=self.server_ssh_client)
|
|
|
|
block_migration = (CONF.compute_feature_enabled.
|
|
block_migration_for_live_migration)
|
|
# migrate self.server
|
|
self.os_admin.servers_client.live_migrate_server(
|
|
self.server['server']['id'], host=None,
|
|
block_migration=block_migration)
|
|
self.wait_for_server_active(
|
|
self.server['server'], client=self.os_admin.servers_client)
|
|
new_host = self.get_host_shortname_for_server(
|
|
self.server['server']['id'])
|
|
self.assertNotEqual(self.compute, new_host, 'Server1 did not migrate')
|
|
# migrate server2
|
|
compute_names = [
|
|
node['name'] for node in self.nodes
|
|
if node['is_compute'] and node['short_name'] not in (
|
|
new_host, server2_host)]
|
|
host = random.choice(compute_names)
|
|
self.os_admin.servers_client.live_migrate_server(
|
|
server2['server']['id'], host=host,
|
|
block_migration=block_migration)
|
|
self.wait_for_server_active(
|
|
server2['server'], client=self.os_admin.servers_client)
|
|
|
|
new_server2_host = self.get_host_shortname_for_server(
|
|
server2['server']['id'])
|
|
self.assertNotEqual(server2_host, new_server2_host,
|
|
'Server2 did not migrate')
|
|
|
|
# verify N/S connection with self.server
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[new_host],
|
|
expected_mac=self.fip_port_mac,
|
|
ssh_client=self.server_ssh_client)
|
|
# verify N/S connection with server2
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[new_server2_host],
|
|
expected_mac=server2_fip_mac,
|
|
ssh_client=server2_ssh_client)
|
|
|
|
# verify E/W connection between self.server and server2
|
|
# remove duplicates
|
|
expected_routing_nodes = list(set([new_host, new_server2_host]))
|
|
if len(expected_routing_nodes) == 1:
|
|
self.skipTest(
|
|
"Servers are running on same compute - Please check if "
|
|
"DifferentHostFilter is configured within the "
|
|
"NovaSchedulerDefaultFilters list")
|
|
self.check_east_west_icmp_flow(
|
|
dst_ip=server2_ip,
|
|
expected_routing_nodes=expected_routing_nodes,
|
|
expected_macs=(self.server['port']['mac_address'], server2_mac),
|
|
ssh_client=self.server_ssh_client)
|
|
|
|
@decorators.idempotent_id('609997ab-bffc-40e5-a858-635099df4db9')
|
|
@testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
|
|
'Live migration is not available.')
|
|
@testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
|
|
'Less than 2 compute nodes, skipping multinode '
|
|
'tests.')
|
|
@decorators.attr(type='slow')
|
|
@utils.services('compute', 'network')
|
|
def test_validate_dvr_connectivity_live_migration_different_networks(self):
|
|
"""This test is like test_validate_dvr_connectivity_live_migration
|
|
but VM instances are created in different tenant networks
|
|
|
|
Plase see previous test description for more details
|
|
"""
|
|
self.create_security_group(
|
|
name=data_utils.rand_name('secgroup'))
|
|
self.create_loginable_secgroup_rule(
|
|
secgroup_id=self.security_groups[-1]['id'])
|
|
self.create_pingable_secgroup_rule(
|
|
secgroup_id=self.security_groups[-1]['id'])
|
|
self.keypair = self.create_keypair()
|
|
router = self.create_router_by_client()
|
|
|
|
networks = []
|
|
subnets = []
|
|
servers = []
|
|
servers_host = []
|
|
servers_fip_mac = []
|
|
servers_ssh_client = []
|
|
router_port_subnet_macs = []
|
|
for i in range(2):
|
|
networks.append(self.create_network())
|
|
subnets.append(self.create_subnet(network=networks[i]))
|
|
self.create_router_interface(router['id'], subnets[i]['id'])
|
|
scheduler_hints = (
|
|
{'different_host': servers[0]['server']['id']}
|
|
if i > 0
|
|
else {})
|
|
servers.append(self._create_server(
|
|
network=networks[i], scheduler_hints=scheduler_hints))
|
|
servers_host.append(self.get_host_shortname_for_server(
|
|
servers[i]['server']['id']))
|
|
servers_fip_mac.append(self.get_fip_port_details(
|
|
servers[i]['fip'])['mac_address'])
|
|
servers_ssh_client.append(ssh.Client(
|
|
servers[i]['fip']['floating_ip_address'],
|
|
CONF.validation.image_ssh_user,
|
|
pkey=self.keypair['private_key']))
|
|
# verify N/S connection with servers
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[servers_host[i]],
|
|
expected_mac=servers_fip_mac[i],
|
|
ssh_client=servers_ssh_client[i])
|
|
# obtain router mac for each network
|
|
router_port_subnet_macs.append(
|
|
self.os_admin.network_client.list_ports(
|
|
device_id=router['id'],
|
|
device_owner=constants.DEVICE_OWNER_ROUTER_INTF,
|
|
network_id=networks[i]['id'])['ports'][0]['mac_address'])
|
|
|
|
def get_mac_mapping_for_vm(vm):
|
|
return self.get_mac_mappings(
|
|
self.find_node_client(
|
|
self.get_host_shortname_for_server(vm['server']['id'])),
|
|
'datacentre')
|
|
|
|
def get_expected_macs_for_vlan_tenant():
|
|
return [
|
|
(servers[0]['port']['mac_address'],
|
|
get_mac_mapping_for_vm(servers[1])),
|
|
(get_mac_mapping_for_vm(servers[0]),
|
|
servers[1]['port']['mac_address'])]
|
|
|
|
# verify E/W connection between servers
|
|
if self.get_network_type(networks[0]['id']) == 'vlan' and \
|
|
self.get_network_type(networks[1]['id']) == 'vlan':
|
|
ew_expected_macs = get_expected_macs_for_vlan_tenant()
|
|
else:
|
|
ew_expected_macs = [
|
|
(servers[0]['port']['mac_address'],
|
|
router_port_subnet_macs[0]),
|
|
(router_port_subnet_macs[1],
|
|
servers[1]['port']['mac_address'])]
|
|
# remove duplicates
|
|
expected_routing_nodes = list(set([host for host in servers_host]))
|
|
if len(expected_routing_nodes) == 1:
|
|
self.skipTest(
|
|
"Servers are running on same compute - Please check if "
|
|
"DifferentHostFilter is configured within the "
|
|
"NovaSchedulerDefaultFilters list")
|
|
self.check_east_west_icmp_flow(
|
|
dst_ip=servers[1]['port']['fixed_ips'][0]['ip_address'],
|
|
expected_routing_nodes=expected_routing_nodes,
|
|
expected_macs=ew_expected_macs,
|
|
ssh_client=servers_ssh_client[0])
|
|
|
|
block_migration = (CONF.compute_feature_enabled.
|
|
block_migration_for_live_migration)
|
|
# migrate self.server
|
|
new_servers_host = []
|
|
for i, server in enumerate(servers):
|
|
if i < 1:
|
|
host = None
|
|
else:
|
|
compute_names = [
|
|
node['name'] for node in self.nodes
|
|
if node['is_compute'] and node['short_name'] not in (
|
|
new_servers_host[0], servers_host[1])]
|
|
host = random.choice(compute_names)
|
|
self.os_admin.servers_client.live_migrate_server(
|
|
server['server']['id'], host=host,
|
|
block_migration=block_migration)
|
|
self.wait_for_server_active(server['server'])
|
|
new_servers_host.append(self.get_host_shortname_for_server(
|
|
server['server']['id']))
|
|
self.assertNotEqual(servers_host[i], new_servers_host[i],
|
|
'Server%d did not migrate' % i)
|
|
|
|
# verify N/S connection with servers
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[new_servers_host[i]],
|
|
expected_mac=servers_fip_mac[i],
|
|
ssh_client=servers_ssh_client[i])
|
|
|
|
# verify E/W connection between servers
|
|
# Since with vlan tenant we have MAC mapping on compute nodes
|
|
# and VMs migrated, we need to retrieve MAC address combination
|
|
# for filtering again
|
|
if self.get_network_type(networks[0]['id']) == 'vlan' and \
|
|
self.get_network_type(networks[1]['id']) == 'vlan':
|
|
ew_expected_macs = get_expected_macs_for_vlan_tenant()
|
|
|
|
# remove duplicates
|
|
expected_routing_nodes = list(set([host for host in new_servers_host]))
|
|
if len(expected_routing_nodes) == 1:
|
|
self.skipTest(
|
|
"Servers are running on same compute - Please check if "
|
|
"DifferentHostFilter is configured within the "
|
|
"NovaSchedulerDefaultFilters list")
|
|
self.check_east_west_icmp_flow(
|
|
dst_ip=servers[1]['port']['fixed_ips'][0]['ip_address'],
|
|
expected_routing_nodes=expected_routing_nodes,
|
|
expected_macs=ew_expected_macs,
|
|
ssh_client=servers_ssh_client[0])
|
|
|
|
@decorators.idempotent_id('0423e5b5-ac6a-4d4a-ad98-b0465e3ad71d')
|
|
def test_dvr_create_delete_fip_restart_instance(self):
|
|
"""Check that traffic from a VM with a FIP passes through compute
|
|
node where VM is running and traffic from VM without a FIP passes
|
|
through controller node where router is scheduled.
|
|
|
|
The aim of the test is to verify that routing is distributed only
|
|
when VM has a FIP and centralized when VM does not have a FIP.
|
|
Currently only OVN DVR environments are supported.
|
|
The test also verifies that an instance reboot does not affect the
|
|
connectivity or/and the routing.
|
|
|
|
Topology: Any topology with separate controller and compute
|
|
nodes is valid for running the test.
|
|
Recommended topology: 3 controller nodes with networking services and
|
|
2 compute nodes with configured access to external network.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, router, pingable and loginable security
|
|
groups, keypair, run 2 VM instances (servers). One of the VMs is a
|
|
test server without a FIP and second one is a 'proxy' (with a FIP)
|
|
in order to have ability to access the test server even in case it
|
|
does not have a FIP.
|
|
2. Check on which controller node the router was scheduled.
|
|
3. Capture traffic on all nodes on the interface connected to the
|
|
external network and ping an external address from the
|
|
test server.
|
|
4. Search for the icmp packets with router gateway port mac address in
|
|
all capture files.
|
|
5. Verify that icmp traffic with the router gateway port mac address
|
|
was found on controller node where the router was scheduled.
|
|
6. Create a FIP for the test server, ping external address and this
|
|
time verify that traffic is passing through compute node where the
|
|
test server is running. This time we are looking for the test server
|
|
FIP port mac.
|
|
7. Delete the FIP from the test server and verify that traffic again is
|
|
passing through controller node where router is scheduled.
|
|
8. Add a new FIP to the test server and verify that traffic again is
|
|
passing through compute node where router is scheduled.
|
|
9. Restart the server and verify that routing has not changed
|
|
|
|
"""
|
|
self._setup()
|
|
test_server = self._create_server(exclude_hosts=self.exclude_hosts,
|
|
create_floating_ip=False)
|
|
test_server_ip = test_server['port']['fixed_ips'][0]['ip_address']
|
|
test_server_client = ssh.Client(
|
|
test_server_ip,
|
|
CONF.validation.image_ssh_user,
|
|
pkey=self.keypair['private_key'],
|
|
proxy_client=self.server_ssh_client)
|
|
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[self.chassis_name],
|
|
expected_mac=self.router_port['mac_address'],
|
|
ssh_client=test_server_client,
|
|
ignore_outbound=self.ignore_outbound)
|
|
|
|
# Now add a fip to the test server and make sure that routing now
|
|
# via compute.
|
|
LOG.debug('Adding floating ip to source vm')
|
|
|
|
fip = self.os_admin.network_client.create_floatingip(
|
|
port_id=test_server['port']['id'],
|
|
floating_network_id=CONF.network.public_network_id)['floatingip']
|
|
fip_port_mac = self.get_fip_port_details(fip)['mac_address']
|
|
test_server_compute = self.get_host_shortname_for_server(
|
|
test_server['server']['id'])
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[test_server_compute],
|
|
expected_mac=fip_port_mac,
|
|
ssh_client=test_server_client)
|
|
|
|
# Delete fip and make sure that traffic goes via router chassis.
|
|
LOG.debug('Deleting floating ip from source vm')
|
|
self.os_admin.network_client.delete_floatingip(fip['id'])
|
|
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[self.chassis_name],
|
|
expected_mac=self.router_port['mac_address'],
|
|
ssh_client=test_server_client,
|
|
ignore_outbound=self.ignore_outbound)
|
|
|
|
# Add a new fip to the test server and make sure that routing is
|
|
# via compute again.
|
|
LOG.debug('Adding new floating ip to source vm')
|
|
|
|
fip = self.os_admin.network_client.create_floatingip(
|
|
port_id=test_server['port']['id'],
|
|
floating_network_id=CONF.network.public_network_id)['floatingip']
|
|
fip_port_mac = self.get_fip_port_details(fip)['mac_address']
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[test_server_compute],
|
|
expected_mac=fip_port_mac,
|
|
ssh_client=test_server_client)
|
|
|
|
# Reboot the server and make sure that routing is still via compute.
|
|
LOG.debug('Rebooting vm')
|
|
self.os_admin.servers_client.reboot_server(
|
|
test_server['server']['id'], type='SOFT')
|
|
waiters.wait_for_server_status(self.os_admin.servers_client,
|
|
test_server['server']['id'],
|
|
neutron_constants.SERVER_STATUS_ACTIVE)
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[test_server_compute],
|
|
expected_mac=fip_port_mac,
|
|
ssh_client=test_server_client)
|
|
|
|
|
|
class OvnDvrAdvancedTest(base.BaseTempestTestCaseAdvanced,
|
|
OvnDvrBase):
|
|
|
|
@classmethod
|
|
def create_loginable_secgroup_rule(cls, secgroup_id=None, client=None):
|
|
super(OvnDvrAdvancedTest, cls).create_loginable_secgroup_rule(
|
|
secgroup_id=secgroup_id, client=client)
|
|
# the parent method only creates an ssh rule for IPv4 traffic and IPv6
|
|
# is needed too
|
|
cls.create_security_group_rule(
|
|
security_group_id=secgroup_id,
|
|
client=client,
|
|
protocol='tcp',
|
|
direction='ingress',
|
|
ip_version=6,
|
|
port_range_min=22,
|
|
port_range_max=22)
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(OvnDvrAdvancedTest, cls).resource_setup()
|
|
cls.keypair = cls.create_keypair()
|
|
cls.secgroup = cls.os_primary.network_client.create_security_group(
|
|
name=data_utils.rand_name('secgroup'))
|
|
cls.security_groups.append(cls.secgroup['security_group'])
|
|
cls.create_loginable_secgroup_rule(
|
|
secgroup_id=cls.secgroup['security_group']['id'])
|
|
cls.router = cls.create_router_by_client()
|
|
|
|
@staticmethod
|
|
def _configure_ip_address_in_vm(ssh_client, nic, ip_address):
|
|
ssh_client.execute_script(
|
|
'ip addr add %s dev %s' % (ip_address, nic),
|
|
become_root=True)
|
|
if netaddr.valid_ipv4(ip_address):
|
|
# We need this delay in order to simulate behavior on a busy
|
|
# environment, i.e. cause a race condition between garp and tcp
|
|
# traffic, see rhbz#2035079.
|
|
# Without the delay the race condition does not occur and the test
|
|
# passes successfully even with broken core OVN.
|
|
time.sleep(5)
|
|
ssh_client.execute_script(
|
|
'arping -c 1 -A -I %s %s' % (nic, ip_address),
|
|
become_root=True)
|
|
|
|
@staticmethod
|
|
def _remove_ip_address_from_vm(ssh_client, nic, ip_address):
|
|
ssh_client.execute_script(
|
|
'ip addr del {ip} dev {nic}; ip addr'.format(
|
|
ip=ip_address, nic=nic),
|
|
become_root=True)
|
|
|
|
@staticmethod
|
|
def _start_tcp_connection(ssh_client):
|
|
ssh_process = ssh_client.open_session()
|
|
cmd = ('ping {}'.format(local_constants.GLOBAL_IP))
|
|
ssh_process.exec_command(cmd)
|
|
|
|
def _failover_vip(
|
|
self, master_vm, new_master_vm, vip_ssh, nic, ip_address):
|
|
self._start_tcp_connection(vip_ssh)
|
|
self._remove_ip_address_from_vm(
|
|
master_vm['ssh_client'], nic, ip_address)
|
|
self._configure_ip_address_in_vm(
|
|
new_master_vm['ssh_client'], nic, ip_address)
|
|
|
|
@staticmethod
|
|
def _get_src_ip_from_route(dst):
|
|
# if there is no local IP from the external subnet's CIDR (this happens
|
|
# with BGP setups), the src IP from the route to the vip(fip) is used
|
|
# instead
|
|
ipr = IPRoute()
|
|
routes = ipr.get_routes(dst=dst)
|
|
ipr.close()
|
|
for route in routes:
|
|
for attr in route.get('attrs', []):
|
|
if attr[0] == 'RTA_PREFSRC':
|
|
return attr[1]
|
|
|
|
def _get_filters(self, src_ip, vip_mac, vip_ip):
|
|
if self.external_network['provider:network_type'] == 'vlan':
|
|
filters = 'vlan {} and '.format(
|
|
self.external_network['provider:segmentation_id'])
|
|
else:
|
|
filters = ''
|
|
|
|
if not WB_CONF.bgp and vip_mac is not None:
|
|
filters += 'ether host {} and dst host {}'.format(
|
|
vip_mac, src_ip)
|
|
else:
|
|
filters += 'src host {} and dst host {}'.format(
|
|
vip_ip, src_ip)
|
|
|
|
filters += ' and tcp src port 22'
|
|
return filters
|
|
|
|
def _capture_and_test_failover_vip(
|
|
self, filters, vm1, vm2, nic, vip_ip, vip_ssh_client):
|
|
self._start_captures(filters)
|
|
|
|
self._failover_vip(vm1, vm2, vip_ssh_client, nic, vip_ip)
|
|
# BZ#2035079 reproduced reliably when 2 failovers happen
|
|
self._failover_vip(vm2, vm1, vip_ssh_client, nic, vip_ip)
|
|
self._stop_captures()
|
|
|
|
LOG.debug('Expected routing nodes: %s',
|
|
','.join(self.expected_routing_nodes))
|
|
actual_routing_nodes = [node['short_name']
|
|
for node in self.nodes if
|
|
(node.get('capture') and
|
|
not node['capture'].is_empty())]
|
|
LOG.debug('Actual routing nodes: %s',
|
|
','.join(actual_routing_nodes))
|
|
self.assertCountEqual(
|
|
self.expected_routing_nodes, actual_routing_nodes)
|
|
|
|
def create_ext_vm(self, ip):
|
|
# On podified environments sending traffic from local IP address
|
|
# affects test results since tempest is running on a node that
|
|
# can be a gateway node. Therefore an additional VM on external
|
|
# network is used as a proxy host.
|
|
# Note: we can not use ansible controller as a proxy host here
|
|
# since on some environments the ansible controller does not have
|
|
# access to the external network.
|
|
ext_vm = self._create_server(
|
|
network=self.external_network,
|
|
create_floating_ip=False)
|
|
ext_vm_ip = ext_vm['port']['fixed_ips'][0]['ip_address']
|
|
ext_vm_ssh_client = ssh.Client(
|
|
ext_vm_ip,
|
|
self.username, pkey=self.keypair['private_key'])
|
|
vip_ssh_client = ssh.Client(
|
|
ip, self.username,
|
|
pkey=self.keypair['private_key'],
|
|
proxy_client=ext_vm_ssh_client)
|
|
ext_vm_host = self.get_host_shortname_for_server(
|
|
ext_vm['server']['id'])
|
|
if ext_vm_host not in self.expected_routing_nodes:
|
|
self.expected_routing_nodes.append(ext_vm_host)
|
|
return vip_ssh_client, ext_vm_ip
|
|
|
|
@decorators.idempotent_id('509d1432-3879-40d4-9378-e6a0d972a292')
|
|
def test_dvr_vip_failover_basic(self):
|
|
"""Test DVR during VIP failover using a tenant network and FIPs
|
|
|
|
The test checks that during VIP failover on DVR environment traffic
|
|
remains distributed.
|
|
Scenario:
|
|
1. Create private network, router, connect the network to the
|
|
router and the router to the public network.
|
|
2. Spawn 2 VMs connected to the private network.
|
|
3. Create a port on the private network that will work as
|
|
a virtual ip address (VIP) for the VMs. Make sure that VM ports
|
|
have this ip address configured in the allowed address pairs list.
|
|
4. Create FIPs on the public network for VMs and the VIP.
|
|
5. Configure VIP address on one of the VMs, initiate a TCP
|
|
connection to the VIP FIP.
|
|
6. Start traffic captures on all overcloud nodes filtering
|
|
VIP FIP MAC address.
|
|
7. Failover the VIP from one VM to another with a delay before
|
|
sending GARP in order to simulate behaviour on a busy system.
|
|
See relevant customer issue for details, BZ#2035079.
|
|
8. Check traffic captures on all nodes to see where VIP FIP
|
|
traffic really passed. Expected behavior is to see traffic on
|
|
compute nodes rather than any networker node.
|
|
9. In case BGP is configured with expose_tenant_networks, the test
|
|
proceeds to remove the FIPs, disable snat from the router and check
|
|
connectivity by repeating steps 6, 7 and 8 using tenant IPs instead
|
|
of FIPs. The only expected routing node is the controller hosting
|
|
the router gateway port.
|
|
|
|
"""
|
|
vm1, vm2 = self._create_vms_by_topology()
|
|
vm1_port = self.client.list_ports(device_id=vm1['id'])['ports'][0]
|
|
vip_port = self.client.create_port(
|
|
network_id=vm1_port['network_id'])['port']
|
|
self.ports.append(vip_port)
|
|
vip_ip = vip_port['fixed_ips'][0]['ip_address']
|
|
self.client.update_port(
|
|
vm1_port['id'], allowed_address_pairs=[{"ip_address": vip_ip}])
|
|
vm2_port = self.client.list_ports(device_id=vm2['id'])['ports'][0]
|
|
self.client.update_port(
|
|
vm2_port['id'], allowed_address_pairs=[{"ip_address": vip_ip}])
|
|
vip_fip = self.create_floatingip(port=vip_port)
|
|
vip_fip_ip = vip_fip['floating_ip_address']
|
|
|
|
self.expected_routing_nodes = []
|
|
for vm in [vm1, vm2]:
|
|
nic = local_utils.get_default_interface(vm['ssh_client'])
|
|
self.expected_routing_nodes.append(
|
|
self.get_host_shortname_for_server(vm['id']))
|
|
|
|
vip_ssh_client = ssh.Client(
|
|
vip_fip['floating_ip_address'], self.username,
|
|
pkey=self.keypair['private_key'])
|
|
vip_fip_mac = self.get_fip_port_details(vip_fip)['mac_address']
|
|
|
|
# Let's set vip first on vm1 and then will do the vip failover to vm2
|
|
self._configure_ip_address_in_vm(vm1['ssh_client'], nic, vip_ip)
|
|
self.ensure_external_network_is_shared()
|
|
vip_ssh_client, src_ip = self.create_ext_vm(vip_fip_ip)
|
|
filters = self._get_filters(src_ip, vip_fip_mac, vip_fip_ip)
|
|
self._capture_and_test_failover_vip(
|
|
filters, vm1, vm2, nic, vip_ip, vip_ssh_client)
|
|
|
|
# Test connectivity to tenant VIP if BGP is configured with
|
|
# expose_tenant_networks
|
|
if self.bgp_expose_tenant_networks:
|
|
# remove FIPs
|
|
vm1_fip = self.client.list_floatingips(
|
|
port_id=vm1_port['id'])['floatingips'][0]
|
|
vm2_fip = self.client.list_floatingips(
|
|
port_id=vm2_port['id'])['floatingips'][0]
|
|
self.delete_floatingip(vm1_fip)
|
|
self.delete_floatingip(vm2_fip)
|
|
self.delete_floatingip(vip_fip)
|
|
|
|
# update ssh clients with tenant IPs
|
|
vm1_ip = vm1_port['fixed_ips'][0]['ip_address']
|
|
vm2_ip = vm2_port['fixed_ips'][0]['ip_address']
|
|
vm1['ssh_client'].host = vm1_ip
|
|
vm2['ssh_client'].host = vm2_ip
|
|
vip_ssh_client.host = vip_ip
|
|
|
|
# disable snat from the router
|
|
self.os_admin.network_client.update_router_with_snat_gw_info(
|
|
self.router['id'],
|
|
external_gateway_info={
|
|
'network_id': CONF.network.public_network_id,
|
|
'enable_snat': False})
|
|
|
|
# generate updated tcpdump filters
|
|
src_ip = self._get_src_ip_from_route(vip_ip)
|
|
filters = self._get_filters(
|
|
src_ip, vip_port['mac_address'], vip_ip)
|
|
|
|
# calculate new expected_routing_nodes = chassis hosting the router
|
|
# gateway is the only expected match (traffic from that
|
|
# chassis/controller to the computes hosting the two VMs goes
|
|
# through a geneve tunnel)
|
|
router_port = self.os_admin.network_client.list_ports(
|
|
device_id=self.router['id'],
|
|
device_owner=constants.DEVICE_OWNER_ROUTER_GW)['ports'][0]
|
|
router_gateway_chassis = self.get_router_gateway_chassis(
|
|
router_port['id'])
|
|
self.expected_routing_nodes = [router_gateway_chassis]
|
|
|
|
self._capture_and_test_failover_vip(
|
|
filters, vm1, vm2, nic, vip_ip, vip_ssh_client)
|
|
|
|
@decorators.idempotent_id('5d812adb-ba7c-4ce3-a589-4cc3426d1578')
|
|
def test_dvr_vip_failover_external_network(self):
|
|
"""Test DVR during VIP failover using an external network with IPv4 and
|
|
IPv6 subnets
|
|
|
|
Repeat test test_dvr_vip_failover using VMs connected directly to the
|
|
external network. The test checks that during VIP failover on DVR
|
|
environment traffic remains distributed.
|
|
Scenario:
|
|
1. Spawn 2 VMs connected to the external network.
|
|
3. Create a port on the external network that will work as
|
|
a virtual ip address (VIP) for the VMs. Make sure that VM ports
|
|
have this ip address configured in the allowed address pairs list.
|
|
4. Configure VIP address on one of the VMs, initiate a TCP
|
|
connection to the VIP FIP.
|
|
5. Start traffic captures on all overcloud nodes filtering
|
|
VIP MAC address.
|
|
6. Failover the VIP from one VM to another with a delay before
|
|
sending GARP in order to simulate behaviour on a busy system.
|
|
See relevant customer issue for details, BZ#2035079.
|
|
7. Check traffic captures on all nodes to see where VIP
|
|
traffic really passed. Expected behavior is to see traffic on
|
|
compute nodes rather than any networker node.
|
|
8. If the external network has an IPv6 subnet too, repeat steps
|
|
2 to 7 with an IPv6 VIP port.
|
|
|
|
"""
|
|
vm1, vm2 = self._create_vms_by_topology(topology='external')
|
|
vm1_port = self.client.list_ports(device_id=vm1['id'])['ports'][0]
|
|
vm2_port = self.client.list_ports(device_id=vm2['id'])['ports'][0]
|
|
vip_port = self.client.create_port(
|
|
network_id=vm1_port['network_id'])['port']
|
|
self.ports.append(vip_port)
|
|
vip_ip = vip_port['fixed_ips'][0]['ip_address']
|
|
aap = [{"ip_address": vip_ip}]
|
|
|
|
self.expected_routing_nodes = []
|
|
for vm in [vm1, vm2]:
|
|
nic = local_utils.get_default_interface(vm['ssh_client'])
|
|
self.expected_routing_nodes.append(
|
|
self.get_host_shortname_for_server(vm['id']))
|
|
|
|
# checking whether an external IPv6 subnet exists or not
|
|
ip_versions = [
|
|
subnet['ip_version']
|
|
for subnet in self.os_admin.network_client.list_subnets(
|
|
network_id=CONF.network.public_network_id)['subnets']]
|
|
|
|
if len(ip_versions) > 1 and ip_versions[1] == 6:
|
|
skip_ipv6 = False
|
|
vip_portv6 = self.client.create_port(
|
|
network_id=vm1_port['network_id'])['port']
|
|
self.ports.append(vip_portv6)
|
|
vip_ipv6 = vip_portv6['fixed_ips'][1]['ip_address']
|
|
vip_ssh_clientv6 = ssh.Client(
|
|
vip_ipv6, self.username, pkey=self.keypair['private_key'])
|
|
# adding the vip_ipv6 address to the allowed-address-pairs list
|
|
aap.append({"ip_address": vip_ipv6})
|
|
else:
|
|
skip_ipv6 = True
|
|
LOG.info('The test cannot be executed with an IPv6 address')
|
|
|
|
self.os_admin.network_client.update_port(
|
|
vm1_port['id'], allowed_address_pairs=aap)
|
|
self.os_admin.network_client.update_port(
|
|
vm2_port['id'], allowed_address_pairs=aap)
|
|
|
|
# Let's set vip first on vm1 and then will do the vip failover to vm2
|
|
self._configure_ip_address_in_vm(vm1['ssh_client'], nic, vip_ip)
|
|
vip_ssh_client, src_ip = self.create_ext_vm(vip_ip)
|
|
# vip_mac is set to None because the filter should be based on the
|
|
# vip_ip instead in this case, where no FIPs are used
|
|
filters = self._get_filters(src_ip, None, vip_ip)
|
|
|
|
self._capture_and_test_failover_vip(
|
|
filters, vm1, vm2, nic, vip_ip, vip_ssh_client)
|
|
|
|
# removing the IPv4 VIP address
|
|
self._remove_ip_address_from_vm(vm1['ssh_client'], nic, vip_ip)
|
|
|
|
# now, with the external ipv6 address
|
|
# Let's set vip first on vm2 and then will do the vip failover to vm1
|
|
if skip_ipv6 is False:
|
|
self._configure_ip_address_in_vm(vm2['ssh_client'], nic, vip_ipv6)
|
|
src_ip = self._get_src_ip_from_route(vip_ipv6)
|
|
# vip_mac is set to None because the filter should be based on the
|
|
# vip_ip instead in this case, where no FIPs are used
|
|
filters = self._get_filters(src_ip, None, vip_ipv6)
|
|
self._capture_and_test_failover_vip(
|
|
filters, vm2, vm1, nic, vip_ipv6, vip_ssh_clientv6)
|