
Fixes some new PEP8 errors that appear with jobs running on new ubuntu version, and temporarily filters out the larger I202 error ("Additional newline in a group of imports"). This patch updates the hacking and flake8-import-order versions. Copied from: https://review.opendev.org/c/openstack/ovn-octavia-provider/+/936855 Change-Id: Ice4513eedc4fd6f054c19d1854eff00aeb5c35a1
313 lines
14 KiB
Python
313 lines
14 KiB
Python
# Copyright 2024 Red Hat, Inc.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
import testtools
|
|
|
|
from neutron_lib import constants as lib_constants
|
|
from neutron_tempest_plugin.common import ssh
|
|
from neutron_tempest_plugin.common import utils as common_utils
|
|
from oslo_log import log
|
|
from tempest import config
|
|
from tempest.lib.common.utils import data_utils
|
|
from tempest.lib.common.utils import test_utils
|
|
from tempest.lib import decorators
|
|
from tempest.lib import exceptions
|
|
|
|
from whitebox_neutron_tempest_plugin.common import constants
|
|
from whitebox_neutron_tempest_plugin.common import utils
|
|
from whitebox_neutron_tempest_plugin.tests.scenario import base
|
|
|
|
|
|
CONF = config.CONF
|
|
WB_CONF = config.CONF.whitebox_neutron_plugin_options
|
|
LOG = log.getLogger(__name__)
|
|
|
|
|
|
class L3haOvnCommon(base.TrafficFlowTest, base.BaseTempestTestCaseOvn):
|
|
credentials = ['primary', 'admin']
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(L3haOvnCommon, cls).resource_setup()
|
|
cls.setup_api_microversion_fixture(
|
|
compute_microversion='2.74')
|
|
|
|
def verify_routing_via_chassis(self, chassis_id):
|
|
self.expected_gateway_chassis = None
|
|
|
|
def _get_router_gateway_chassis_by_id(chassis_id):
|
|
try:
|
|
self.expected_gateway_chassis = \
|
|
self.get_router_gateway_chassis_by_id(chassis_id)
|
|
except exceptions.SSHExecCommandFailed as err:
|
|
LOG.exception(err)
|
|
LOG.warning("Retrying to obtain router gateway chassis in "
|
|
"case the OVN DBs are not ready yet")
|
|
return False
|
|
return True
|
|
|
|
common_utils.wait_until_true(
|
|
lambda: _get_router_gateway_chassis_by_id(chassis_id),
|
|
timeout=60, sleep=5)
|
|
|
|
LOG.debug("Waiting until router gateway chassis is updated")
|
|
self.router_gateway_chassis = None
|
|
|
|
def _router_gateway_chassis_updated():
|
|
self.router_gateway_chassis = self.get_router_gateway_chassis(
|
|
self.router_port['id'])
|
|
LOG.debug("chassis = '%s', expected = %s ",
|
|
self.router_gateway_chassis,
|
|
self.expected_gateway_chassis)
|
|
return self.router_gateway_chassis == self.expected_gateway_chassis
|
|
|
|
try:
|
|
common_utils.wait_until_true(
|
|
lambda: _router_gateway_chassis_updated(),
|
|
timeout=60, sleep=5)
|
|
except common_utils.WaitTimeout:
|
|
self.fail("Gateway chassis was not updated as expected")
|
|
|
|
self.check_north_south_icmp_flow(
|
|
dst_ip=self.gateway_external_ip,
|
|
expected_routing_nodes=[self.expected_gateway_chassis],
|
|
expected_mac=self.router_port['mac_address'],
|
|
ssh_client=self.test_server_client,
|
|
ignore_outbound=self.ignore_outbound)
|
|
|
|
def _validate_gateway_chassis(self, chassis_id):
|
|
node_name = self.get_router_gateway_chassis_by_id(chassis_id)
|
|
if self.get_node_setting(node_name, 'is_controller'):
|
|
raise self.skipException(
|
|
"The test currently does not support the required action "
|
|
"when gateway chassis is on controller.")
|
|
|
|
def _setup(self):
|
|
def create_router_candidate():
|
|
router = self.create_router_by_client()
|
|
self.router_port = self.os_admin.network_client.list_ports(
|
|
device_id=router['id'],
|
|
device_owner=lib_constants.DEVICE_OWNER_ROUTER_GW)['ports'][0]
|
|
self.chassis_list = self.get_router_gateway_chassis_list(
|
|
self.router_port['id'])
|
|
chassis_name = self.get_router_gateway_chassis_by_id(
|
|
self.chassis_list[0])
|
|
LOG.debug("router chassis name = %s", chassis_name)
|
|
return router, chassis_name
|
|
|
|
if (WB_CONF.avoid_disrupting_controllers and
|
|
self.get_standalone_networkers()):
|
|
attempts = 5
|
|
controller_nodes = [node['name'] for node in self.nodes
|
|
if node['is_controller']]
|
|
for i in range(1, attempts):
|
|
LOG.debug("Router creation attempt %s", i)
|
|
router, chassis_name = create_router_candidate()
|
|
if chassis_name in controller_nodes:
|
|
continue
|
|
else:
|
|
break
|
|
else:
|
|
router, chassis_name = create_router_candidate()
|
|
|
|
self._validate_gateway_chassis(self.chassis_list[0])
|
|
# Since we are going to spawn VMs with 'host' option which
|
|
# is available only for admin user, we create security group
|
|
# and keypair also as admin
|
|
secgroup = self.os_admin.network_client.create_security_group(
|
|
name=data_utils.rand_name('secgroup'))
|
|
self.security_groups.append(secgroup['security_group'])
|
|
self.os_admin.network_client.create_security_group_rule(
|
|
security_group_id=secgroup['security_group']['id'],
|
|
protocol=lib_constants.PROTO_NAME_ICMP,
|
|
direction=lib_constants.INGRESS_DIRECTION)
|
|
self.os_admin.network_client.create_security_group_rule(
|
|
security_group_id=secgroup['security_group']['id'],
|
|
protocol=lib_constants.PROTO_NAME_TCP,
|
|
direction=lib_constants.INGRESS_DIRECTION,
|
|
port_range_min=22,
|
|
port_range_max=22)
|
|
self.addCleanup(
|
|
test_utils.call_and_ignore_notfound_exc,
|
|
self.os_admin.network_client.delete_security_group,
|
|
secgroup['security_group']['id'])
|
|
self.keypair = self.os_admin.keypairs_client.create_keypair(
|
|
name=data_utils.rand_name('keypair'))['keypair']
|
|
self.network = self.create_network()
|
|
self.subnet = self.create_subnet(self.network)
|
|
self.create_router_interface(router['id'], self.subnet['id'])
|
|
|
|
# We create VMs on compute hosts that are not on the same host
|
|
# as router gateway port, i.e. the test is capable to work even
|
|
# on environments that schedule ovn routers on compute nodes
|
|
self.exclude_hosts = [chassis_name]
|
|
ssh_proxy_server = self._create_server(
|
|
exclude_hosts=self.exclude_hosts)
|
|
test_server = self._create_server(exclude_hosts=self.exclude_hosts,
|
|
create_floating_ip=False)
|
|
self.ssh_proxy_server_client = ssh.Client(
|
|
ssh_proxy_server['fip']['floating_ip_address'],
|
|
CONF.validation.image_ssh_user,
|
|
pkey=self.keypair['private_key'])
|
|
test_server_ip = test_server['port']['fixed_ips'][0]['ip_address']
|
|
self.test_server_client = ssh.Client(
|
|
test_server_ip,
|
|
CONF.validation.image_ssh_user,
|
|
pkey=self.keypair['private_key'],
|
|
proxy_client=self.ssh_proxy_server_client)
|
|
|
|
network_details = self.os_admin.network_client.show_network(
|
|
self.network['id'])
|
|
if network_details['network']['provider:network_type'] == 'vlan':
|
|
# This helps to avoid false positives with vlan+dvr,see BZ2192633
|
|
self.ignore_outbound = True
|
|
else:
|
|
self.ignore_outbound = False
|
|
self.verify_routing_via_chassis(self.chassis_list[0])
|
|
|
|
def refresh_nodes_data(self):
|
|
self.discover_nodes()
|
|
if WB_CONF.openstack_type == 'podified':
|
|
self.set_ovs_pods_for_nodes()
|
|
|
|
|
|
@testtools.skipUnless(WB_CONF.run_power_operations_tests,
|
|
"run_power_operations_tests conf value is not "
|
|
"enabled.")
|
|
class L3haOvnDisruptiveTest(L3haOvnCommon, base.BaseDisruptiveTempestTestCase):
|
|
@decorators.idempotent_id('cf47a5e3-35cb-423c-84af-4cc6d389cfbd')
|
|
@decorators.attr(type='slow')
|
|
def test_l3ha_reboot_node(self):
|
|
"""Check that traffic from a VM connected to an internal network
|
|
passes through a networker node node which is the highest priority
|
|
chassis for a router the internal network is connected to.
|
|
|
|
The test is intended for OVN environments.
|
|
|
|
Topology: Any topology with at least 2 nodes acting as networker nodes
|
|
(controller nodes with networking services also valid) and at least
|
|
one compute node.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, router, pingable and loginable security
|
|
group rules, keypair, run a VM instance (server).
|
|
2. Find which node is the highest priority chassis for the router.
|
|
3. Ping an external address from the VM and make sure that traffic
|
|
is passing through the interface connected to the external network
|
|
on the highest priority chassis.
|
|
4. Shutdown the node where the higher priority chassis was scheduled
|
|
and repeat steps 2-3. Make sure that now traffic is passing through
|
|
the other node(chassis).
|
|
5. Start up the turned off host, wait until it is up and repeat steps
|
|
2-3. Make sure that highest priority chassis is back and traffic
|
|
is passing through it.
|
|
|
|
"""
|
|
# ensures overcloud nodes are up for next tests
|
|
self.addCleanup(self.ensure_overcloud_nodes_active)
|
|
self._setup()
|
|
gateway_node = self.router_gateway_chassis
|
|
|
|
self.power_off_host(gateway_node)
|
|
self.refresh_nodes_data()
|
|
self.verify_routing_via_chassis(self.chassis_list[1])
|
|
|
|
self.power_on_host(gateway_node)
|
|
self.refresh_nodes_data()
|
|
self.verify_routing_via_chassis(self.chassis_list[0])
|
|
|
|
|
|
class L3haOvnTest(L3haOvnCommon):
|
|
@decorators.idempotent_id('f8fe1f69-a87f-41d8-ac6e-ed7905438338')
|
|
@decorators.attr(type='slow')
|
|
def test_l3ha_bring_down_interface(self):
|
|
"""Check that traffic from a VM connected to an internal network
|
|
passes through a networker node which is the highest priority
|
|
chassis for a router the internal network is connected to.
|
|
|
|
The test is intended for OVN environments.
|
|
|
|
Topology: Any topology with at least 2 nodes acting as dedicated
|
|
networker nodes.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, router, pingable and loginable security
|
|
group rules, keypair, run a VM instance (server).
|
|
2. Find which node is the highest priority chassis for the router.
|
|
3. Ping an external address from the VM and make sure that traffic
|
|
is passing through the interface connected to the external network
|
|
on the highest priority chassis.
|
|
4. Bring down the interface which is passing tenant traffic
|
|
on the node where the higher priority chassis was scheduled
|
|
and repeat steps 2-3. Make sure that now traffic is passing through
|
|
the other node(chassis).
|
|
5. Bring up the interface, wait until port mappings updated and
|
|
repeat steps 2-3. Make sure that highest priority chassis is back
|
|
and traffic is passing through it.
|
|
|
|
"""
|
|
self._setup()
|
|
node_client = self.find_node_client(self.router_gateway_chassis)
|
|
interface = WB_CONF.node_tenant_interface
|
|
self.addCleanup(
|
|
utils.interface_state_set, node_client, interface,
|
|
constants.STATE_UP)
|
|
utils.interface_state_set(node_client, interface, constants.STATE_DOWN)
|
|
self.verify_routing_via_chassis(self.chassis_list[1])
|
|
|
|
utils.interface_state_set(node_client, interface, constants.STATE_UP)
|
|
self.verify_routing_via_chassis(self.chassis_list[0])
|
|
|
|
@decorators.idempotent_id('c662477b-6871-4c19-ae87-a2ece859d7f4')
|
|
@decorators.attr(type='slow')
|
|
def test_l3ha_stop_ovs_service(self):
|
|
"""Check that traffic from a VM connected to an internal network
|
|
passes through a networker node which is the highest priority
|
|
chassis for a router the internal network is connected to.
|
|
|
|
The test is intended for OVN environments.
|
|
|
|
Topology: Any topology with at least 2 nodes acting as dedicated
|
|
networker nodes.
|
|
|
|
Scenario:
|
|
1. Create network, subnet, router, pingable and loginable security
|
|
group rules, keypair, run a VM instance (server).
|
|
2. Find which node is the highest priority chassis for the router.
|
|
3. Ping an external address from the VM and make sure that traffic
|
|
is passing through the interface connected to the external network
|
|
on the highest priority chassis.
|
|
4. Stop the openvswitch service on the node where the higher priority
|
|
chassis was scheduled and repeat steps 2-3. Make sure that now
|
|
traffic is passing through the other node(chassis).
|
|
5. Start the openvswitch service, wait until port mappings updated and
|
|
repeat steps 2-3. Make sure that highest priority chassis is back
|
|
and traffic is passing through it.
|
|
|
|
"""
|
|
self._setup()
|
|
node_client = self.find_node_client(self.router_gateway_chassis)
|
|
remote_service = 'ovs-vswitchd.service'
|
|
self.addCleanup(
|
|
utils.remote_service_action, node_client,
|
|
remote_service, constants.ACTION_START, 'active')
|
|
utils.remote_service_action(
|
|
node_client, remote_service, constants.ACTION_STOP,
|
|
target_state='inactive')
|
|
self.verify_routing_via_chassis(self.chassis_list[1])
|
|
utils.remote_service_action(
|
|
node_client, remote_service, constants.ACTION_START,
|
|
target_state='active')
|
|
self.verify_routing_via_chassis(self.chassis_list[0])
|