
In the tests which are using tcpdump to check if traffic is going through the right node(s) there was only check if something was captured or not. But in case of failure we didn't know what was captured what caused issue. Now this patch adds logging of the packets captured on all of the nodes in case if the assertion in test failed. Hopefully that will help debugging issues like in the related bug. Related-bug: #OSPRH-11312 Change-Id: I1025ae0c9dbb50d187b2827a8a7c4de864e35875
1740 lines
75 KiB
Python
1740 lines
75 KiB
Python
# Copyright 2024 Red Hat, Inc.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
import base64
|
|
import collections
|
|
from functools import partial
|
|
from multiprocessing import Process
|
|
import os
|
|
import random
|
|
import re
|
|
|
|
import time
|
|
|
|
import yaml
|
|
|
|
import netaddr
|
|
from netifaces import AF_INET
|
|
from netifaces import ifaddresses
|
|
from netifaces import interfaces
|
|
from neutron_lib import constants
|
|
from neutron_tempest_plugin.common import shell
|
|
from neutron_tempest_plugin.common import ssh
|
|
from neutron_tempest_plugin.common import utils as common_utils
|
|
from neutron_tempest_plugin import exceptions
|
|
from neutron_tempest_plugin.scenario import base
|
|
from oslo_log import log
|
|
from tempest.common import utils
|
|
from tempest.common import waiters
|
|
from tempest import config
|
|
from tempest.lib.common import fixed_network
|
|
from tempest.lib.common.utils import data_utils
|
|
from tempest.lib.common.utils import test_utils
|
|
from tempest.lib import exceptions as lib_exceptions
|
|
|
|
from whitebox_neutron_tempest_plugin.common import constants as local_constants
|
|
from whitebox_neutron_tempest_plugin.common import tcpdump_capture as capture
|
|
from whitebox_neutron_tempest_plugin.common import utils as local_utils
|
|
|
|
CONF = config.CONF
|
|
LOG = log.getLogger(__name__)
|
|
WB_CONF = CONF.whitebox_neutron_plugin_options
|
|
ConfigOption = collections.namedtuple(
|
|
'ConfigOption', ('section', 'parameter', 'value'))
|
|
|
|
|
|
class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
|
|
credentials = ['primary', 'admin']
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(BaseTempestWhiteboxTestCase, cls).resource_setup()
|
|
uri = (
|
|
CONF.identity.uri_v3 if CONF.identity.uri_v3
|
|
else CONF.identity.uri)
|
|
cls.is_ipv6 = True if netaddr.valid_ipv6(
|
|
uri[uri.find("[") + 1:uri.find("]")]) else False
|
|
cls.image_ref = CONF.compute.image_ref
|
|
cls.flavor_ref = CONF.compute.flavor_ref
|
|
cls.username = CONF.validation.image_ssh_user
|
|
agents = cls.os_admin.network.AgentsClient().list_agents()['agents']
|
|
ovn_agents = [agent for agent in agents if 'ovn' in agent['binary']]
|
|
cls.has_ovn_support = True if ovn_agents else False
|
|
sriov_agents = [
|
|
agent for agent in agents if 'sriov' in agent['binary']]
|
|
cls.has_sriov_support = True if sriov_agents else False
|
|
cls.neutron_conf = local_constants.NEUTRON_CONF[WB_CONF.openstack_type]
|
|
# deployer tool dependent variables
|
|
cls.setup_proxy_host()
|
|
if WB_CONF.openstack_type == 'podified':
|
|
cls.neutron_api_prefix = '{} rsh {} '.format(
|
|
cls.OC, cls.get_pods_of_service()[0])
|
|
cls.external_network = cls.os_admin.network_client.show_network(
|
|
CONF.network.public_network_id)['network']
|
|
|
|
@classmethod
|
|
def setup_proxy_host(cls):
|
|
# proxy host commonly used for commands such as oc or openstack
|
|
if WB_CONF.openstack_type == 'devstack':
|
|
cls.master_node_client = cls.get_node_client(
|
|
'localhost')
|
|
cls.master_cont_cmd_executor = \
|
|
cls.run_on_master_controller
|
|
cls.neutron_api_prefix = ''
|
|
elif WB_CONF.openstack_type == 'podified':
|
|
cls.OC = "oc -n openstack "
|
|
cls.proxy_host_client = cls.get_node_client(
|
|
host=WB_CONF.proxy_host_address,
|
|
username=WB_CONF.proxy_host_user,
|
|
key_filename=WB_CONF.proxy_host_key_file)
|
|
if WB_CONF.kubeconfig_path:
|
|
cls.proxy_host_client.exec_command(
|
|
"ln -s {} /home/{}/.kube/config || true".format(
|
|
WB_CONF.kubeconfig_path, WB_CONF.proxy_host_user))
|
|
cls.master_node_client = cls.proxy_host_client
|
|
cls.master_cont_cmd_executor = \
|
|
cls.proxy_host_client.exec_command
|
|
else:
|
|
LOG.warning(("Unrecognized deployer tool '%s', plugin supports "
|
|
"openstack_type as devstack/podified.",
|
|
WB_CONF.openstack_type))
|
|
|
|
@classmethod
|
|
def run_on_master_controller(cls, cmd):
|
|
if WB_CONF.openstack_type == 'podified':
|
|
output = cls.proxy_host_client.exec_command(cmd)
|
|
if WB_CONF.openstack_type == 'devstack':
|
|
output, errors = local_utils.run_local_cmd(cmd)
|
|
LOG.debug("Stderr: %s", errors.decode())
|
|
output = output.decode()
|
|
LOG.debug("Output: %s", output)
|
|
return output.strip()
|
|
|
|
def get_host_for_server(self, server_id):
|
|
server_details = self.os_admin.servers_client.show_server(server_id)
|
|
return server_details['server']['OS-EXT-SRV-ATTR:host']
|
|
|
|
def get_host_shortname_for_server(self, server_id):
|
|
return self.get_host_for_server(server_id).split('.')[0]
|
|
|
|
@classmethod
|
|
def get_external_gateway(cls):
|
|
if CONF.network.public_network_id:
|
|
subnets = cls.os_admin.network_client.list_subnets(
|
|
network_id=CONF.network.public_network_id)
|
|
|
|
for subnet in subnets['subnets']:
|
|
if (subnet['gateway_ip'] and
|
|
subnet['ip_version'] == constants.IP_VERSION_4):
|
|
return subnet['gateway_ip']
|
|
|
|
@staticmethod
|
|
def get_node_client(
|
|
host, username=WB_CONF.overcloud_ssh_user, pkey=None,
|
|
key_filename=WB_CONF.overcloud_key_file):
|
|
if pkey:
|
|
return ssh.Client(host=host, username=username, pkey=pkey)
|
|
else:
|
|
return ssh.Client(host=host, username=username,
|
|
key_filename=key_filename)
|
|
|
|
def get_local_ssh_client(self, network=None):
|
|
if network is None:
|
|
local_ip = '127.0.0.1'
|
|
else:
|
|
local_ip = self._get_local_ip_from_network(
|
|
self.get_subnet_cidr(network, 4))
|
|
return ssh.Client(
|
|
host=local_ip,
|
|
username=shell.execute_local_command('whoami').stdout.rstrip(),
|
|
key_filename=WB_CONF.overcloud_key_file)
|
|
|
|
def get_subnet_cidr(self, network, ip_version):
|
|
for subnet_id in network['subnets']:
|
|
subnet = self.os_admin.network_client.show_subnet(
|
|
subnet_id)['subnet']
|
|
if subnet['ip_version'] == ip_version:
|
|
return subnet['cidr']
|
|
|
|
def find_node_client(self, node_name):
|
|
for node in self.nodes:
|
|
# we want to make sure we are comparing short names
|
|
# in this case either short or long node_name will work
|
|
if node['short_name'] == node_name.split('.')[0]:
|
|
return node['client']
|
|
|
|
def find_different_compute_host(self, exclude_hosts):
|
|
for node in self.nodes:
|
|
if not node['is_compute']:
|
|
continue
|
|
if node['is_compute'] and not node['short_name'] in exclude_hosts:
|
|
return node['name']
|
|
raise self.skipException(
|
|
"Not able to find a different compute than: {}".format(
|
|
exclude_hosts))
|
|
|
|
def get_network_type(self, network_id):
|
|
network_details = self.os_admin.network_client.show_network(
|
|
network_id)
|
|
return network_details['network']['provider:network_type']
|
|
|
|
@staticmethod
|
|
def _get_local_ip_from_network(network):
|
|
host_ip_addresses = [ifaddresses(iface)[AF_INET][0]['addr']
|
|
for iface in interfaces()
|
|
if AF_INET in ifaddresses(iface)]
|
|
for ip_address in host_ip_addresses:
|
|
if netaddr.IPAddress(ip_address) in netaddr.IPNetwork(network):
|
|
return ip_address
|
|
|
|
def get_fip_port_details(self, fip):
|
|
fip_ports = self.os_admin.network_client.list_ports(
|
|
network_id=CONF.network.public_network_id,
|
|
device_owner=constants.DEVICE_OWNER_FLOATINGIP)['ports']
|
|
for fp in fip_ports:
|
|
if (fp.get('fixed_ips') and len(fp['fixed_ips']) != 0 and
|
|
fp['fixed_ips'][0]['ip_address'] ==
|
|
fip['floating_ip_address']):
|
|
return fp
|
|
|
|
def ensure_external_network_is_shared(self):
|
|
if not self.external_network['shared']:
|
|
self.addClassResourceCleanup(
|
|
self.os_admin.network_client.update_network,
|
|
self.external_network['id'], shared=False)
|
|
self.os_admin.network_client.update_network(
|
|
self.external_network['id'], shared=True)
|
|
|
|
@classmethod
|
|
def get_podified_nodes_data(cls):
|
|
|
|
def get_ocp_main_ip(host):
|
|
LOG.debug('Searching for OCP node main IP corresponding to %s',
|
|
host)
|
|
for ocp_node_yaml in ocp_node_yaml_list:
|
|
ocp_main_ip = None
|
|
ocp_hostname = None
|
|
for address in ocp_node_yaml['status']['addresses']:
|
|
if address['type'] == 'InternalIP':
|
|
ocp_main_ip = address['address']
|
|
if address['type'] == 'Hostname':
|
|
ocp_hostname = address['address'].split('.')[0]
|
|
if ocp_main_ip and ocp_hostname == host.split('.')[0]:
|
|
LOG.debug('IP address found for %s: %s', host, ocp_main_ip)
|
|
return ocp_main_ip
|
|
|
|
LOG.warning('No IP address found for %s', host)
|
|
return host
|
|
|
|
def append_node_data(node, is_crc):
|
|
# Here we mean ansible controller node used by the ci-framework
|
|
# This controller is not a part of OSP and should be skipped
|
|
if node.startswith('controller'):
|
|
return
|
|
if node.startswith('ocp') and not is_crc:
|
|
# a specific ssh key is used for accessing ocp nodes
|
|
key = 'ansible_ssh_private_key_file' # meaning dict key here
|
|
# save path of ocp nodes key (if not yet), we'll need it later
|
|
if not hasattr(cls, 'ocp_nodes_key_path'):
|
|
cls.ocp_nodes_key_path = (
|
|
hosts_data[node][key].replace(
|
|
'~', '/home/{}'.format(WB_CONF.proxy_host_user)))
|
|
node_key = hosts_data[node][key].split('/')[-1]
|
|
node_ip = get_ocp_main_ip(hosts_data[node]['ansible_host'])
|
|
is_controller = True
|
|
else:
|
|
node_key = 'id_cifw_key'
|
|
node_ip = hosts_data[node]['ansible_host']
|
|
# Here we mean a node with running OSP control plane services
|
|
is_controller = (node.startswith('ocp') or
|
|
node.startswith('crc'))
|
|
node_data = {
|
|
'ip': node_ip,
|
|
'user': hosts_data[node]['ansible_user'],
|
|
'key': node_key,
|
|
'is_controller': is_controller}
|
|
nodes.append(node_data)
|
|
|
|
nodes = []
|
|
inventory_data = yaml.safe_load(
|
|
cls.proxy_host_client.exec_command(
|
|
'cat ' + WB_CONF.proxy_host_inventory_path))
|
|
is_crc = False
|
|
ocps = inventory_data['all']['children']['ocps']
|
|
if 'hosts' in ocps and any(ocp_key.startswith('crc')
|
|
for ocp_key in ocps['hosts'].keys()):
|
|
is_crc = True
|
|
else:
|
|
# create ocp_node_yaml_list
|
|
ocp_node_list = cls.proxy_host_client.exec_command(
|
|
"{} get nodes -o name".format(cls.OC)).splitlines()
|
|
ocp_node_yaml_list = []
|
|
for ocp_node in ocp_node_list:
|
|
output = cls.proxy_host_client.exec_command(
|
|
"{} get {} -o yaml".format(cls.OC, ocp_node))
|
|
ocp_node_yaml_list.append(yaml.safe_load(output))
|
|
|
|
LOG.debug("Environment is %s based on CRC",
|
|
"" if is_crc else "n't")
|
|
items = inventory_data['all']['children']
|
|
hosts_data = {}
|
|
host_names = []
|
|
for host_type in items.keys():
|
|
if is_crc:
|
|
host_names.extend(
|
|
list(items[host_type]['hosts'].keys()))
|
|
for host_name in host_names:
|
|
hosts_data[host_name] = (
|
|
inventory_data['all']['hosts'][host_name])
|
|
else:
|
|
hosts_data.update(inventory_data[host_type]['hosts'])
|
|
for host in hosts_data:
|
|
append_node_data(host, is_crc)
|
|
return nodes
|
|
|
|
@classmethod
|
|
def discover_nodes(cls):
|
|
agents = cls.os_admin.network.AgentsClient().list_agents()['agents']
|
|
if cls.has_ovn_support:
|
|
l3_agent_hosts = [
|
|
agent['host'] for agent in agents
|
|
if agent['agent_type'] == 'OVN Controller Gateway agent']
|
|
else:
|
|
l3_agent_hosts = [
|
|
agent['host'] for agent in agents
|
|
if agent['binary'] == 'neutron-l3-agent']
|
|
compute_hosts = [
|
|
host['hypervisor_hostname'] for host
|
|
in cls.os_admin.hv_client.list_hypervisors()['hypervisors']]
|
|
if WB_CONF.openstack_type == 'podified':
|
|
cls.nodes_data = cls.get_podified_nodes_data()
|
|
with open(WB_CONF.proxy_host_key_file, 'r') as file:
|
|
id_cifw_key = file.read()
|
|
cls.keys_data = {
|
|
'id_cifw_key': id_cifw_key}
|
|
if hasattr(cls, 'ocp_nodes_key_path'):
|
|
devscripts_key = cls.proxy_host_client.exec_command(
|
|
'cat ' + cls.ocp_nodes_key_path)
|
|
cls.keys_data['devscripts_key'] = devscripts_key
|
|
for host in cls.nodes_data:
|
|
client = cls.get_node_client(
|
|
host=host['ip'], username=host['user'],
|
|
pkey=f"{cls.keys_data[host['key']]}")
|
|
host['client'] = client
|
|
else:
|
|
cls.nodes_data = []
|
|
for host in set([*l3_agent_hosts, *compute_hosts]):
|
|
cls.nodes_data.append(
|
|
{'ip': host, 'client': cls.get_node_client(host)})
|
|
cls.nodes = []
|
|
for host in cls.nodes_data:
|
|
if not local_utils.host_responds_to_ping(host['ip']):
|
|
continue
|
|
host['name'] = host['client'].exec_command('hostname -f').strip()
|
|
host['short_name'] = host['name'].split('.')[0]
|
|
host['is_compute'] = (host['name'] in compute_hosts)
|
|
host['is_networker'] = (host['name'] in l3_agent_hosts)
|
|
if WB_CONF.openstack_type == 'devstack':
|
|
# Here we are checking if there are controller-specific
|
|
# processes running on the node
|
|
output = host['client'].exec_command(
|
|
r"ps ax | grep 'rabbit\|galera' | grep -v grep || true")
|
|
host['is_controller'] = (output.strip() != "")
|
|
cls.nodes.append(host)
|
|
|
|
@classmethod
|
|
def get_standalone_networkers(cls):
|
|
return [node['name'] for node in cls.nodes
|
|
if node['is_networker'] and not
|
|
(node['is_controller'] or node['is_compute'])]
|
|
|
|
@classmethod
|
|
def is_setup_single_node(cls):
|
|
if not hasattr(cls, 'nodes'):
|
|
cls.discover_nodes()
|
|
return len(cls.nodes) == 1
|
|
|
|
def get_node_setting(self, node_name, setting):
|
|
for node in self.nodes:
|
|
if node_name == node['name'] or node_name == node['short_name']:
|
|
return node[setting]
|
|
|
|
@classmethod
|
|
def get_pods_of_service(cls, service='neutron', pod_state='Running'):
|
|
# pod_state as empty string can be used to get all pod states
|
|
if pod_state:
|
|
pod_state = "--field-selector=status.phase={}".format(pod_state)
|
|
pods_list = "{} get pods -o=name {}".format(cls.OC, pod_state)
|
|
if service == 'neutron':
|
|
filters = "cut -d'/' -f 2 | grep ^neutron | grep -v meta"
|
|
else:
|
|
filters = "cut -d'/' -f 2 | grep {}".format(service)
|
|
|
|
pods_output = cls.proxy_host_client.exec_command(
|
|
"{} | {}; true".format(pods_list, filters))
|
|
return [pod.strip() for pod in pods_output.splitlines()]
|
|
|
|
@classmethod
|
|
def get_configs_of_service(cls, service='neutron'):
|
|
# (rsafrono) at this moment only neutron configs were handled
|
|
# since it's the only service that existing tests are using
|
|
if service == 'neutron':
|
|
pod = cls.get_pods_of_service(service)[0]
|
|
return cls.proxy_host_client.exec_command(
|
|
'{} rsh {} find {} -type f'.format(
|
|
cls.OC, pod, os.path.split(
|
|
cls.neutron_conf)[0])).strip().split('\n')
|
|
|
|
# TODO(mblue): next gen computes configuration set should be done too,
|
|
# 'oc patch' for data plane would need more steps and triggers deployment
|
|
@classmethod
|
|
def set_service_setting(
|
|
cls,
|
|
node_type: str = 'controller',
|
|
file: str = '',
|
|
service: str = 'neutron',
|
|
config_list: list[ConfigOption] = None,
|
|
cfg_change: bool = True
|
|
) -> None:
|
|
"""Set configuration for service
|
|
|
|
:param node_type: Node type for change, ex: controller/compute
|
|
(currently only controllers).
|
|
:param file: File for configuration change (except in podified).
|
|
All configuration parameters must be defined in the same file.
|
|
:param service: Podified service name (only podified).
|
|
:param config_list: List with the multiple configuration parameters,
|
|
defined in a namedtuple ConfigOption(section, parameter, value).
|
|
:param cfg_change: by default, it is always expected that the
|
|
configuration will change; in a podified environment, that implies a
|
|
pod replacement.
|
|
"""
|
|
assert config_list, ("At least one configuration parameter must be "
|
|
"supplied")
|
|
if WB_CONF.openstack_type == 'podified' and node_type != 'compute':
|
|
service_pod = cls.get_pods_of_service(service)[0]
|
|
# TODO(mblue): process ini in python instead of crudini command,
|
|
# without depending on hardcoded conf filenames, crudini bin in pod
|
|
custom_file = '02-neutron-custom.conf'
|
|
# combine configuration to stdout using mutable copy in service pod
|
|
# NOTE(mblue): 'bash -c' needed for 'oc rsh' to execute a few
|
|
# commands in pod shell session (instead of outside session).
|
|
copy_config = (
|
|
"{0} rsh {1} bash -c "
|
|
"'cp /etc/neutron/neutron.conf.d/{2} /tmp/ && "
|
|
"chmod g+w /tmp/{2}'").format(
|
|
cls.OC, service_pod, custom_file)
|
|
cls.proxy_host_client.exec_command(copy_config)
|
|
for config_option in config_list:
|
|
combine_conf_cmd = (
|
|
"{0} rsh {1} bash -c '"
|
|
"crudini --set /tmp/{2} {3} {4} {5}'").format(
|
|
cls.OC, service_pod, custom_file, config_option.section,
|
|
config_option.parameter, config_option.value)
|
|
cls.proxy_host_client.exec_command(combine_conf_cmd)
|
|
read_conf_cmd = "{0} rsh {1} bash -c 'cat /tmp/{2}'".format(
|
|
cls.OC, service_pod, custom_file)
|
|
combined_conf = cls.proxy_host_client.exec_command(read_conf_cmd)
|
|
combined_conf_ind = combined_conf.replace('\n', '\n' + 8 * ' ')
|
|
patch_buffer = (
|
|
'spec:\n'
|
|
' {}:\n'
|
|
' template:\n'
|
|
' customServiceConfig: |\n'
|
|
' {}\n'
|
|
).format(
|
|
service,
|
|
combined_conf_ind)
|
|
cmd = ("{0} patch $({0} get oscp -o name) --type=merge "
|
|
"--patch '".format(cls.OC) + patch_buffer + "'")
|
|
LOG.debug("Set configuration command:\n%s", cmd)
|
|
output = cls.proxy_host_client.exec_command(cmd)
|
|
LOG.debug("Output:\n%s", output)
|
|
if not cfg_change and '(no change)' in output:
|
|
# No config change done, no pod replacement.
|
|
return
|
|
|
|
# TODO(mblue): Add another check using network agent list
|
|
# status with neutron api (as was in downstream code).
|
|
|
|
# wait until old service pod is fully replaced
|
|
def _service_pod_replaced():
|
|
_service_pods = cls.get_pods_of_service(
|
|
service=service,
|
|
pod_state='')
|
|
term_service_pods = cls.get_pods_of_service(
|
|
service=service,
|
|
pod_state='Terminating')
|
|
# conditions:
|
|
# 1) any service pod listed
|
|
# 2) old service pod removed (in case replacement didn't start)
|
|
# 3) no terminating service pods (replacement finished)
|
|
return len(_service_pods) > 0 and \
|
|
service_pod not in _service_pods and \
|
|
len(term_service_pods) == 0
|
|
_timeout = 120
|
|
common_utils.wait_until_true(
|
|
_service_pod_replaced,
|
|
timeout=_timeout,
|
|
sleep=10,
|
|
exception=RuntimeError(
|
|
"'{}' pod not replaced in {} seconds:\n{}".format(
|
|
service_pod,
|
|
_timeout,
|
|
cls.get_pods_of_service(
|
|
service=service,
|
|
pod_state=''))))
|
|
else:
|
|
for config_option in config_list:
|
|
cls.run_group_cmd(
|
|
'sudo crudini --set {} {} {} {} && sudo sync'.format(
|
|
file, config_option.section, config_option.parameter,
|
|
config_option.value),
|
|
node_type)
|
|
|
|
@classmethod
|
|
def check_service_setting(
|
|
cls, host, service='neutron', config_files=None,
|
|
section='DEFAULT', param='', value='true',
|
|
msg='Required config value is missing', skip_if_fails=True):
|
|
"""Check if a service on a node has a setting with a value in config
|
|
|
|
:param host(dict): Dictionary with host-related parameters,
|
|
host['client'] is a required parameter.
|
|
:param service(str): Name of the containerized service.
|
|
:param config_files(list): List with paths to config files. List makes
|
|
sense on podified where e.g. neutron has
|
|
2 config files with same sections.
|
|
:param section(str): Section in the config file.
|
|
:param param(str): Parameter in section to check.
|
|
:param value(str): Expected value, case insensitive.
|
|
:param msg(str): Message to print in case of expected value not found
|
|
:param skip_if_fails(bool): skip if the check fails - if it fails and
|
|
skip_if_fails is False, return False.
|
|
|
|
"""
|
|
|
|
if WB_CONF.openstack_type == 'podified':
|
|
service_prefix = "{} rsh {}".format(
|
|
cls.OC, cls.get_pods_of_service(service)[0])
|
|
else:
|
|
service_prefix = ""
|
|
cmd_prefix = "crudini --get"
|
|
# If we have config file with defaults and second one with overrides,
|
|
# the latter has the config that wins
|
|
for config_file in reversed(config_files):
|
|
setting = "{} {} {}".format(config_file, section, param)
|
|
cmd = "{} {} {} || true".format(
|
|
service_prefix, cmd_prefix, setting)
|
|
LOG.debug("Command = '%s'", cmd)
|
|
result = host['client'].exec_command(cmd).strip()
|
|
LOG.debug("Result = '%s'", result)
|
|
# Since we are checking files in reverse order,
|
|
# if we've found a value than it's an override and we
|
|
# should ignore values in other files
|
|
if result:
|
|
if value.lower() in result.lower():
|
|
return True
|
|
else:
|
|
break
|
|
|
|
if skip_if_fails:
|
|
raise cls.skipException(msg)
|
|
else:
|
|
return False
|
|
|
|
@classmethod
|
|
def is_service_on_node(cls, service_name, ssh_client):
|
|
"""Checks systemctl service existence on node using pattern.
|
|
Any non a-z char could be any char.
|
|
"""
|
|
# NOTE(mblue): regex used to fit podified/devstack/tripleo
|
|
# different service names.
|
|
service_regex = re.sub(r'[^a-zA-Z]', '.', service_name)
|
|
host_ip = ssh_client.host
|
|
LOG.debug("Checking for service '%s' (regex) existence on host '%s'.",
|
|
service_regex, host_ip)
|
|
catch = ssh_client.exec_command(
|
|
"systemctl --type=service | grep '{}'; true".format(
|
|
service_regex)).strip()
|
|
if catch:
|
|
LOG.debug("Service found on host '%s':\n%s",
|
|
host_ip, catch)
|
|
return True
|
|
LOG.debug("Service not found on host '%s' using glob '%s'.",
|
|
host_ip, service_regex)
|
|
return False
|
|
|
|
@classmethod
|
|
def reset_node_service(cls, service_alias, ssh_client,
|
|
wait_until_active=True, timeout=30):
|
|
host_ip = ssh_client.host
|
|
service_name = ssh_client.exec_command(
|
|
"systemctl list-unit-files --type service | grep {} | "
|
|
"cut -d' ' -f1".format(
|
|
service_alias.replace(" ", ".*"))).strip()
|
|
LOG.debug("Restarting service '%s' on host '%s'.",
|
|
service_name, host_ip)
|
|
ssh_client.exec_command(
|
|
'sudo systemctl restart {}'.format(service_name))
|
|
if not wait_until_active:
|
|
return
|
|
|
|
def _is_service_active():
|
|
return 'active' in ssh_client.exec_command(
|
|
'sudo systemctl is-active {}; true'.format(service_name))
|
|
LOG.debug("Waiting for service '%s' to become active on host '%s'.",
|
|
service_name, host_ip)
|
|
common_utils.wait_until_true(
|
|
_is_service_active, timeout=timeout, sleep=5,
|
|
exception=RuntimeError(
|
|
"Timed out {} seconds, service {} "
|
|
"didn't become active after restart.\n\n'''\n{}\n'''".format(
|
|
timeout, service_name, ssh_client.exec_command(
|
|
'sudo systemctl status {}; true'.format(
|
|
service_name)))))
|
|
LOG.debug("Service '%s' active on host '%s'.",
|
|
service_name, host_ip)
|
|
|
|
def _create_server(
|
|
self, create_floating_ip=True, exclude_hosts=None,
|
|
network=None, use_admin_client=False, **kwargs):
|
|
network = network or self.network
|
|
kwargs.setdefault('name', data_utils.rand_name('server-test'))
|
|
kwargs['flavorRef'] = self.flavor_ref
|
|
kwargs['imageRef'] = self.image_ref
|
|
kwargs['networks'] = [{'uuid': network['id']}]
|
|
if not kwargs.get('key_name'):
|
|
kwargs['key_name'] = self.keypair['name']
|
|
if not kwargs.get('security_groups'):
|
|
kwargs['security_groups'] = [{
|
|
'name': self.security_groups[-1]['name']}]
|
|
# (rsafrono) delete scheduler_hints from kwargs if it's set to empty
|
|
# dict. This prevents 'scheduler_hints was unexpected' error.
|
|
if not kwargs.get('scheduler_hints'):
|
|
kwargs.pop("scheduler_hints", None)
|
|
if exclude_hosts:
|
|
exclude_hosts_ignored = False
|
|
if kwargs.get('host') and (
|
|
kwargs['host'].split('.')[0] in exclude_hosts):
|
|
exclude_hosts_ignored = True
|
|
LOG.debug("'exclude_hosts' parameter contains same value as "
|
|
"'host' so it will be ignored, i.e. 'host' will be "
|
|
"used")
|
|
else:
|
|
kwargs['host'] = self.find_different_compute_host(
|
|
exclude_hosts)
|
|
if kwargs.get('host') or use_admin_client:
|
|
servers_client = self.os_admin.servers_client
|
|
network_client = self.os_admin.network_client
|
|
else:
|
|
servers_client = self.os_primary.servers_client
|
|
network_client = self.os_primary.network_client
|
|
|
|
server = servers_client.create_server(**kwargs)['server']
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
waiters.wait_for_server_termination,
|
|
servers_client,
|
|
server['id'])
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
servers_client.delete_server,
|
|
server['id'])
|
|
# wait for VM ACTIVE to get its host; otherwise host may be None
|
|
self.wait_for_server_active(server, client=servers_client)
|
|
if exclude_hosts and not exclude_hosts_ignored:
|
|
if (self.get_host_shortname_for_server(server['id'])
|
|
in exclude_hosts):
|
|
self.fail("Failed to spawn a server on a host other than in "
|
|
"this list: '{}'. Can not proceed.".format(
|
|
' '.join(exclude_hosts)))
|
|
port = network_client.list_ports(
|
|
network_id=network['id'],
|
|
device_id=server['id'])['ports'][0]
|
|
if create_floating_ip:
|
|
fip = network_client.create_floatingip(
|
|
floating_network_id=CONF.network.public_network_id,
|
|
port_id=port['id'])['floatingip']
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
network_client.delete_floatingip,
|
|
fip['id'])
|
|
else:
|
|
fip = None
|
|
return {'port': port, 'fip': fip, 'server': server}
|
|
|
|
def _create_server_for_topology(
|
|
self, network_id=None, port_type=None,
|
|
different_host=None, port_qos_policy_id=None):
|
|
if not network_id:
|
|
network_id = self.network['id']
|
|
if port_type:
|
|
kwargs = {'binding:vnic_type': port_type,
|
|
'qos_policy_id': port_qos_policy_id}
|
|
port = self.create_port(
|
|
network={'id': network_id}, **kwargs)
|
|
networks = [{'port': port['id']}]
|
|
else:
|
|
networks = [{'uuid': network_id}]
|
|
|
|
params = {
|
|
'flavor_ref': self.flavor_ref,
|
|
'image_ref': self.image_ref,
|
|
'key_name': self.keypair['name'],
|
|
'networks': networks,
|
|
'security_groups': [
|
|
{'name': self.secgroup['security_group']['name']}],
|
|
'name': data_utils.rand_name(self._testMethodName)
|
|
}
|
|
if port_type == 'direct-physical':
|
|
net_vlan = self.client.show_network(
|
|
network_id)['network']['provider:segmentation_id']
|
|
params['user_data'] = build_user_data(net_vlan)
|
|
params['config_drive'] = True
|
|
if (different_host and CONF.compute.min_compute_nodes > 1):
|
|
params['scheduler_hints'] = {
|
|
'different_host': different_host['id']}
|
|
server = self.create_server(**params)['server']
|
|
if different_host and CONF.compute.min_compute_nodes > 1:
|
|
if (self.get_host_for_server(different_host['id']) ==
|
|
self.get_host_for_server(server['id'])):
|
|
raise self.skipException(
|
|
'Failed to run the VM on a different hypervisor, make '
|
|
'sure that DifferentHostFilter is in the list of '
|
|
'enabled nova scheduler filters')
|
|
|
|
port = self.client.list_ports(device_id=server['id'])['ports'][0]
|
|
if network_id == CONF.network.public_network_id:
|
|
access_ip_address = port['fixed_ips'][0]['ip_address']
|
|
else:
|
|
access_ip_address = self.create_floatingip(
|
|
port=port)['floating_ip_address']
|
|
|
|
server['ssh_client'] = ssh.Client(access_ip_address,
|
|
self.username,
|
|
pkey=self.keypair['private_key'])
|
|
return server
|
|
|
|
def _create_vms_by_topology(
|
|
self, topology='internal', port_type=None, ipv6=False,
|
|
different_host=True, num_vms_created=2):
|
|
|
|
"""Function for creating desired topology for the test
|
|
|
|
Available topologies:
|
|
* internal(default): sender and receiver are on tenant network
|
|
* external: sender and receiver are on external(public) network
|
|
* east-west: sender and receiver are on different tenant networks
|
|
* north-south: sender is on external and receiver on tenant network
|
|
|
|
:param topology(str): one of 4 available topologies to use (see list
|
|
above)
|
|
:param port_type(str): type of port to use. If omitted, default port
|
|
type will be used. Can be set to 'direct' or 'direct-physical'
|
|
for SR-IOV environments.
|
|
:param different_host(bool): whether to force vms to run on different
|
|
host.
|
|
:param num_vms_created(int): number of vms to create, 1 or 2.
|
|
default is 2.
|
|
:returns: sender if num_vms_created is 1, else server and receiver
|
|
"""
|
|
# num_vms_created can be 1 or 2
|
|
self.assertIn(num_vms_created, [1, 2], "num_vms_created can be 1 or 2")
|
|
|
|
def _create_local_network():
|
|
network = self.create_network()
|
|
subnet_index = len(self.reserved_subnet_cidrs)
|
|
cidr = '192.168.%d.0/24' % subnet_index
|
|
subnet = self.create_subnet(network, cidr=cidr)
|
|
self.create_router_interface(router['id'], subnet['id'])
|
|
if ipv6:
|
|
ipv6_cidr = '2001:{:x}::/64'.format(200 + subnet_index)
|
|
ra_address_mode = 'dhcpv6-stateless'
|
|
ipv6_subnet = self.create_subnet(
|
|
network, cidr=ipv6_cidr, ip_version=6,
|
|
ipv6_ra_mode=ra_address_mode,
|
|
ipv6_address_mode=ra_address_mode)
|
|
self.create_router_interface(router['id'], ipv6_subnet['id'])
|
|
|
|
return network
|
|
|
|
if topology != 'external':
|
|
if hasattr(self, "router") and self.router:
|
|
router = self.router
|
|
else:
|
|
router = self.create_router_by_client()
|
|
|
|
if topology == 'external' or topology == 'north-south':
|
|
self.ensure_external_network_is_shared()
|
|
src_network = self.external_network
|
|
else:
|
|
src_network = _create_local_network()
|
|
|
|
sender = self._create_server_for_topology(
|
|
network_id=src_network['id'],
|
|
port_type=port_type)
|
|
|
|
if topology == 'external' or topology == 'internal':
|
|
dst_network = src_network
|
|
else:
|
|
dst_network = _create_local_network()
|
|
|
|
different_host = sender if different_host else None
|
|
if num_vms_created == 1:
|
|
return sender
|
|
receiver = self._create_server_for_topology(
|
|
different_host=different_host, network_id=dst_network['id'],
|
|
port_type=port_type)
|
|
return sender, receiver
|
|
|
|
@classmethod
|
|
def get_osp_cmd_prefix(cls, admin=True):
|
|
# TODO(mblue): figure how admin used in podified setup when needed
|
|
if WB_CONF.openstack_type == 'podified':
|
|
prefix = '{} rsh openstackclient '.format(cls.OC)
|
|
elif WB_CONF.openstack_type == 'devstack':
|
|
prefix = '. /opt/stack/devstack/openrc{} && '.format(
|
|
' admin' if admin else '')
|
|
else:
|
|
prefix = '. ~/overcloudrc && '
|
|
return prefix
|
|
|
|
@staticmethod
|
|
def validate_command(cmd, pattern='', timeout=60,
|
|
ssh_client=None,
|
|
ret_bool_status=False,
|
|
ret_bool_pattern=False,
|
|
local_shell=False):
|
|
"""Run a command on a given host (default: host supporting OSP CLI).
|
|
Optional: validation of output by regex, and exit status.
|
|
|
|
:param cmd: Command to execute on given host.
|
|
:type cmd: str
|
|
|
|
:param pattern: Optional regex pattern to validate each
|
|
commands output.
|
|
:type pattern: str, optional
|
|
|
|
:param timeout: Timeout for command to finish.
|
|
:type timeout: int, optional
|
|
|
|
:param ssh_client: Ssh client to execute command, default UC.
|
|
:type ssh_client: SSHClient, optional
|
|
|
|
:param ret_bool_pattern: Return boolean instead of error raise
|
|
in pattern verification (Default False).
|
|
Without any boolean option, returns all output.
|
|
:type ret_bool_pattern: bool, optional
|
|
|
|
:param ret_bool_status: Return boolean instead of error raise
|
|
in exit status verification (Default False).
|
|
Without any boolean option, returns all output.
|
|
:type ret_bool_status: bool, optional
|
|
|
|
:returns: all output of command as str, or boolean if either of
|
|
return boolean options is True (ret_bool_pattern or ret_bool_status).
|
|
"""
|
|
# local_shell overrides any other ssh_client setting intentions
|
|
if local_shell:
|
|
ssh_client = None
|
|
elif ssh_client is None:
|
|
# default execute on proxy node, or according to CI configuration
|
|
if WB_CONF.exec_on_tester:
|
|
# preserve static, check proxy host ssh client, or make one
|
|
if not hasattr(__class__, 'master_node_client'):
|
|
__class__.setup_proxy_host()
|
|
ssh_client = __class__.master_node_client
|
|
else:
|
|
ssh_client = ssh.Client(
|
|
host=WB_CONF.tester_ip,
|
|
username=WB_CONF.tester_user,
|
|
password=WB_CONF.tester_pass,
|
|
key_filename=WB_CONF.tester_key_file)
|
|
|
|
# verify command success using exception
|
|
try:
|
|
result = shell.execute(
|
|
cmd, timeout=timeout, check=(not ret_bool_status),
|
|
ssh_client=ssh_client)
|
|
except exceptions.ShellCommandFailed:
|
|
LOG.exception(
|
|
'Tested command failed (raising error) -> "%s":', cmd)
|
|
# verify command success using boolean
|
|
if ret_bool_status and result.exit_status != 0:
|
|
LOG.debug(
|
|
'Tested command failed (returning False) -> "%s":', cmd)
|
|
return False
|
|
# verify desired output using exception/boolean
|
|
all_output = (result.stderr if result.stderr else '') + \
|
|
(result.stdout if result.stdout else '')
|
|
if pattern:
|
|
fail_msg = 'Pattern "{}" not found in output of "{}" command.'
|
|
try:
|
|
if not re.search(pattern, all_output):
|
|
raise AssertionError(fail_msg.format(pattern, cmd))
|
|
except AssertionError as err:
|
|
if ret_bool_pattern:
|
|
return False
|
|
raise err
|
|
if ret_bool_status or ret_bool_pattern:
|
|
return True
|
|
return all_output
|
|
|
|
@classmethod
|
|
def run_group_cmd(cls, cmd, group='', pattern='', timeout=60,
|
|
check_status=True, parallel=True):
|
|
"""Run a command on a group of overcloud nodes,
|
|
either in parallel/sequential.
|
|
Optional: validation of output by regex, and exit status.
|
|
|
|
:param cmd: Command to execute on nodes group.
|
|
:type cmd: str
|
|
|
|
:param group: Initial name to fit group, ex: controller
|
|
(Default all overcloud nodes).
|
|
:type group: str, optional
|
|
|
|
:param pattern: Optional regex pattern to validate each
|
|
commands output.
|
|
:type pattern: str, optional
|
|
|
|
:param timeout: Timeout for all commands to finish.
|
|
:type timeout: int, optional
|
|
|
|
:param check_status: Whether to verify commands exit status.
|
|
:type check_status: bool, optional
|
|
|
|
:param parallel: Run commands in parallel or sequential.
|
|
:type parallel: bool, optional
|
|
"""
|
|
tasks = []
|
|
group = group.lower()
|
|
group_name = group if group else 'all'
|
|
for node in cls.nodes:
|
|
if node['is_' + group]:
|
|
LOG.info('Running command in %s "%s" on "%s" from group "%s"',
|
|
'parallel' if parallel else 'sequence',
|
|
cmd, node['name'], group_name)
|
|
# functools.partial instead of lambda for "freezed" arguments
|
|
# (figure values in definition time rather than execution time)
|
|
call = partial(cls.validate_command,
|
|
cmd, pattern, timeout, node['client'],
|
|
not check_status)
|
|
tasks.append(Process(target=call)) if parallel else call()
|
|
if parallel and tasks:
|
|
for task in tasks:
|
|
task.start()
|
|
for task in tasks:
|
|
task.join()
|
|
# NOTE(mblue): guarantee wait for exit, or proper error
|
|
common_utils.wait_until_true(
|
|
lambda: None not in [t.exitcode for t in tasks],
|
|
timeout=timeout,
|
|
sleep=min(5, timeout),
|
|
exception=RuntimeError(
|
|
('Timed out: waiting for command "{}" on "{}" nodes\n'
|
|
'({}/{} processes not finished)').format(
|
|
cmd, group_name,
|
|
len([t for t in tasks if t.exitcode is None]),
|
|
len(tasks))))
|
|
if check_status:
|
|
if sum([t.exitcode for t in tasks]) != 0:
|
|
raise AssertionError(
|
|
'Command failure "{}" on "{}" nodes.'.format(
|
|
cmd, group_name))
|
|
|
|
@classmethod
|
|
def cli_create_resource(cls, cmd,
|
|
cleanup_method=None,
|
|
cleanup_args=None,
|
|
env_prefix=None,
|
|
no_id_cmd=False,
|
|
cleanup=True):
|
|
"""Wrapper for OSP resource creation using commands.
|
|
Includes sourcing commonly used credentials and common
|
|
cleanup command call after test class is done/failed.
|
|
|
|
:param cmd: Creation command to execute.
|
|
Example: 'openstack ... create ...'
|
|
:type cmd: str
|
|
|
|
:param cleanup_method: Cleanup function to handle resource
|
|
after test class is finished/failed.
|
|
Default method is validate_command from base class in combination
|
|
with a default delete command deduced from given create command.
|
|
:type cleanup_method: function, optional
|
|
|
|
:param cleanup_args: Arguments passed to cleanup method.
|
|
Default arguments work in combination with default cleanup_method,
|
|
which is a single argument, a figured delete appropriate command.
|
|
:type cleanup_args: tuple, optional
|
|
|
|
:param env_prefix: Prefix added to create command.
|
|
Default prefix sources test user rc file, usually ~/openrc .
|
|
:type env_prefix: str, optional
|
|
|
|
:param no_id_cmd: When set to True omits command suffix to return
|
|
uuid of created resource, then returns all output.
|
|
(Useful for non ordinary creation commands, or extra output parsing).
|
|
Default is False.
|
|
:type no_id_cmd: bool, optional
|
|
|
|
:param cleanup: When set to False, skips adding cleanup in stack
|
|
(therefore not using cleanup_method and cleanup_args).
|
|
Default is True.
|
|
:type cleanup: bool, optional
|
|
|
|
:returns: uuid of resource, or all output according to
|
|
no_id_cmd boolean.
|
|
Default is uuid.
|
|
"""
|
|
# default to overcloudrc credentials
|
|
_env_prefix = env_prefix or cls.get_osp_cmd_prefix()
|
|
_get_id_suffix = '' if no_id_cmd else ' -f value -c id'
|
|
_cmd = _env_prefix + cmd + _get_id_suffix
|
|
_id = cls.validate_command(_cmd).strip()
|
|
# default to delete using CLI, and common arguments figured from cmd
|
|
if cleanup:
|
|
_cleanup_method = cleanup_method or cls.validate_command
|
|
_cleanup_args = cleanup_args or (
|
|
_cmd.partition('create ')[0] + 'delete {}'.format(_id),)
|
|
cls.addClassResourceCleanup(_cleanup_method, *_cleanup_args)
|
|
# length of uuid with dashes, as seen in CLI output
|
|
if not no_id_cmd and len(_id) != 36:
|
|
raise AssertionError(
|
|
"Command for resource creation failed: '{}'".format(_cmd))
|
|
LOG.debug('Command for resource creation succeeded')
|
|
return _id
|
|
|
|
|
|
class BaseTempestTestCaseAdvanced(BaseTempestWhiteboxTestCase):
|
|
"""Base class skips test suites unless advanced image is available,
|
|
also defines handy test settings for advanced image use.
|
|
"""
|
|
|
|
@classmethod
|
|
def skip_checks(cls):
|
|
super(BaseTempestTestCaseAdvanced, cls).skip_checks()
|
|
advanced_image_available = (
|
|
CONF.neutron_plugin_options.advanced_image_ref or
|
|
CONF.neutron_plugin_options.default_image_is_advanced)
|
|
if not advanced_image_available:
|
|
skip_reason = "This test requires advanced image and tools"
|
|
raise cls.skipException(skip_reason)
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(BaseTempestTestCaseAdvanced, cls).resource_setup()
|
|
if CONF.neutron_plugin_options.default_image_is_advanced:
|
|
cls.flavor_ref = CONF.compute.flavor_ref
|
|
cls.image_ref = CONF.compute.image_ref
|
|
cls.username = CONF.validation.image_ssh_user
|
|
else:
|
|
cls.flavor_ref = (
|
|
CONF.neutron_plugin_options.advanced_image_flavor_ref)
|
|
cls.image_ref = CONF.neutron_plugin_options.advanced_image_ref
|
|
cls.username = CONF.neutron_plugin_options.advanced_image_ssh_user
|
|
|
|
|
|
class TrafficFlowTest(BaseTempestWhiteboxTestCase):
|
|
force_tenant_isolation = False
|
|
|
|
@classmethod
|
|
@utils.requires_ext(extension="router", service="network")
|
|
def skip_checks(cls):
|
|
super(TrafficFlowTest, cls).skip_checks()
|
|
if not CONF.network.public_network_id:
|
|
raise cls.skipException(
|
|
'The public_network_id option must be specified.')
|
|
if not WB_CONF.run_traffic_flow_tests:
|
|
raise cls.skipException(
|
|
"CONF.whitebox_neutron_plugin_options."
|
|
"run_traffic_flow_tests set to False.")
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(TrafficFlowTest, cls).resource_setup()
|
|
cls.gateway_external_ip = cls.get_external_gateway()
|
|
if not cls.gateway_external_ip:
|
|
raise cls.skipException("IPv4 gateway is not configured "
|
|
"for public network or public_network_id "
|
|
"is not configured.")
|
|
cls.discover_nodes()
|
|
if WB_CONF.openstack_type == 'podified':
|
|
cls.set_ovs_pods_for_nodes()
|
|
|
|
@classmethod
|
|
def set_ovs_pods_for_nodes(cls):
|
|
cmd = ("{} get pods --field-selector=status.phase=Running "
|
|
"-o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name "
|
|
"-l service=ovn-controller-ovs".format(cls.OC))
|
|
output = cls.proxy_host_client.exec_command(
|
|
cmd).strip().splitlines()
|
|
for line in output:
|
|
for node in cls.nodes:
|
|
# split('.')[0] ensures that we always compare short names.
|
|
# This will work even if for some reason one value is short
|
|
# (can happen in get pods output) and another is long(fqdn).
|
|
if line.split()[0].split('.')[0] == node['short_name']:
|
|
node['ovs_pod'] = line.split()[1]
|
|
|
|
def _start_captures(self, filters, scenario='north_south', interface=None):
|
|
for node in self.nodes:
|
|
if not (node.get('ovs_pod') or
|
|
node['is_compute'] or
|
|
node['is_networker']):
|
|
LOG.debug('Traffic is not captured on node %s because it is '
|
|
'none of these:\n'
|
|
'- a controller running an ovs pod\n'
|
|
'- a compute\n'
|
|
'- a networker', node['name'])
|
|
continue
|
|
elif (WB_CONF.openstack_type == 'podified' and
|
|
node.get('ovs_pod')):
|
|
capture_client = self.proxy_host_client
|
|
command_prefix = "{} rsh {} ".format(self.OC, node['ovs_pod'])
|
|
if interface:
|
|
capture_interface = interface
|
|
else:
|
|
if scenario == 'north_south':
|
|
capture_interface = WB_CONF.ovs_pod_ext_interface
|
|
else:
|
|
capture_interface = WB_CONF.ovs_pod_tunnel_interface
|
|
else:
|
|
capture_client = node['client']
|
|
command_prefix = ''
|
|
if interface:
|
|
capture_interface = interface
|
|
else:
|
|
if scenario == 'north_south':
|
|
capture_interface = WB_CONF.node_ext_interface
|
|
else:
|
|
capture_interface = WB_CONF.node_tunnel_interface
|
|
node['capture'] = capture.TcpdumpCapture(
|
|
capture_client, capture_interface, filters, command_prefix)
|
|
self.useFixture(node['capture'])
|
|
time.sleep(2)
|
|
|
|
def _stop_captures(self):
|
|
for node in self.nodes:
|
|
if node.get('capture'):
|
|
node['capture'].stop()
|
|
|
|
def _log_captured_packets(self):
|
|
for node in self.nodes:
|
|
capture = node.get('capture')
|
|
if capture is None or capture.is_empty():
|
|
captured_packets = "No packets captured"
|
|
else:
|
|
captured_packets = "\n ".join(
|
|
capture.get_captured_records())
|
|
LOG.debug("Node: %s; Packets captured: %s",
|
|
node["short_name"], captured_packets)
|
|
|
|
def check_east_west_icmp_flow(
|
|
self, dst_ip, expected_routing_nodes, expected_macs, ssh_client):
|
|
"""Check that traffic routed as expected within a tenant network
|
|
Both directions are supported.
|
|
Traffic is captured on OVN-specific genev_sys_6081 interface
|
|
|
|
:param dst_ip(str): Destination IP address that we check route to
|
|
:param expected_routing_nodes(list): Hostnames of expected gateways,
|
|
nodes on tunnel interface of which we expect
|
|
to find ethernet frames with packets that we send
|
|
:param expected_macs(tuple): pair of MAC addresses of ports that we
|
|
expect to find on the captured packets
|
|
:param ssh_client(Client): SSH client object of the origin of traffic
|
|
(the one that we send traffic from)
|
|
|
|
"""
|
|
# create filters
|
|
if type(expected_macs) is tuple:
|
|
filters = 'icmp and ether host {0} and ether host {1}'.format(
|
|
expected_macs[0],
|
|
expected_macs[1])
|
|
elif type(expected_macs) is list:
|
|
filters = ('"icmp and ((ether host {0} and ether host {1}) '
|
|
'or (ether host {2} and ether host {3}))"').format(
|
|
expected_macs[0][0],
|
|
expected_macs[0][1],
|
|
expected_macs[1][0],
|
|
expected_macs[1][1])
|
|
else:
|
|
raise TypeError(expected_macs)
|
|
|
|
self._start_captures(filters, scenario='east_west')
|
|
self.check_remote_connectivity(ssh_client, dst_ip, ping_count=2)
|
|
time.sleep(5)
|
|
self._stop_captures()
|
|
LOG.debug('Expected routing nodes: %s',
|
|
','.join(expected_routing_nodes))
|
|
actual_routing_nodes = [node['short_name']
|
|
for node in self.nodes if
|
|
(node.get('capture') and
|
|
not node['capture'].is_empty())]
|
|
LOG.debug('Actual routing nodes: %s',
|
|
','.join(actual_routing_nodes))
|
|
try:
|
|
self.assertCountEqual(expected_routing_nodes, actual_routing_nodes)
|
|
except AssertionError:
|
|
self._log_captured_packets()
|
|
raise
|
|
|
|
def check_north_south_icmp_flow(
|
|
self, dst_ip, expected_routing_nodes, expected_mac, ssh_client,
|
|
ignore_outbound=False):
|
|
"""Check that traffic routed as expected between internal and external
|
|
networks. Both directions are supported.
|
|
|
|
:param dst_ip(str): Destination IP address that we check route to
|
|
:param expected_routing_nodes(list): Hostnames of expected gateways,
|
|
nodes on external interface of which we expect
|
|
to find ethernet frames with packets that we send
|
|
:param expected_mac(str): MAC address of a port that we expect to find
|
|
on the expected gateway external interface
|
|
:param ssh_client(Client): SSH client object of the origin of traffic
|
|
(the one that we send traffic from)
|
|
:param ignore_outbound(bool): Whether to ignore outbound packets.
|
|
This helps to avoid false positives.
|
|
"""
|
|
inbound = '-Qin' if ignore_outbound else ''
|
|
size = None
|
|
if not WB_CONF.bgp:
|
|
filters = '{} icmp and ether host {}'.format(inbound, expected_mac)
|
|
else:
|
|
filters = "{} icmp and icmp[0] == 8".format(inbound)
|
|
size = random.randint(0, 50)
|
|
# Adjust payload size adding icmp header size
|
|
if netaddr.valid_ipv6(dst_ip):
|
|
size += 44
|
|
else:
|
|
size += 28
|
|
# Filter including ip size packet
|
|
filters += " and ip[2:2]=={} and ip dst {}".format(size, dst_ip)
|
|
|
|
self._start_captures(filters)
|
|
self.check_remote_connectivity(
|
|
ssh_client, dst_ip, mtu=size, ping_count=2)
|
|
self._stop_captures()
|
|
LOG.debug('Expected routing nodes: %s', expected_routing_nodes)
|
|
actual_routing_nodes = [node['short_name']
|
|
for node in self.nodes if
|
|
(node.get('capture') and
|
|
not node['capture'].is_empty())]
|
|
LOG.debug('Actual routing nodes: %s',
|
|
','.join(actual_routing_nodes))
|
|
try:
|
|
self.assertCountEqual(expected_routing_nodes, actual_routing_nodes)
|
|
except AssertionError:
|
|
self._log_captured_packets()
|
|
raise
|
|
|
|
|
|
class BaseTempestTestCaseOvn(BaseTempestWhiteboxTestCase):
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(BaseTempestTestCaseOvn, cls).resource_setup()
|
|
if not cls.has_ovn_support:
|
|
raise cls.skipException(
|
|
"OVN agents not found. This test is supported only on "
|
|
"openstack environments with OVN support.")
|
|
|
|
cls.nbctl, cls.sbctl = cls._get_ovn_dbs()
|
|
cls.nbmonitorcmd, cls.sbmonitorcmd = cls._get_ovn_db_monitor_cmds()
|
|
|
|
@classmethod
|
|
def _get_ovn_db_monitor_cmds(cls):
|
|
monitorcmdprefix = 'timeout 300 ovsdb-client monitor -f json '
|
|
if WB_CONF.openstack_type == 'podified':
|
|
nb_monitor_connection_opts = cls.nbctl.replace(
|
|
'ovn-nbctl', '{} unix:/tmp/ovnnb_db.sock'.format(
|
|
monitorcmdprefix))
|
|
sb_monitor_connection_opts = cls.sbctl.replace(
|
|
'ovn-sbctl', '{} unix:/tmp/ovnsb_db.sock'.format(
|
|
monitorcmdprefix))
|
|
# strip --db parameter from connection opts since we're
|
|
# going to run monitoring via a local socket
|
|
return tuple(map(
|
|
lambda cmd: re.sub(r'--db=[^\s]+', '', cmd),
|
|
[nb_monitor_connection_opts, sb_monitor_connection_opts]))
|
|
if WB_CONF.openstack_type == 'devstack':
|
|
regex = r'--db=(.*)$'
|
|
monitorcmdprefix = 'sudo ' + monitorcmdprefix
|
|
# this regex search will return the connection string
|
|
# (tcp:IP:port or ssl:IP:port) and in case of TLS,
|
|
# will also include the TLS options
|
|
nb_monitor_connection_opts = re.search(regex, cls.nbctl).group(1)
|
|
sb_monitor_connection_opts = re.search(regex, cls.sbctl).group(1)
|
|
return (monitorcmdprefix + nb_monitor_connection_opts,
|
|
monitorcmdprefix + sb_monitor_connection_opts)
|
|
|
|
@classmethod
|
|
def get_podified_ovn_db_cmd(cls, db):
|
|
# use the first pod from the list, in case of multiple replicas
|
|
db_pod = cls.proxy_host_client.exec_command(
|
|
'{} get pods -l service=ovsdbserver-{} -o name'.format(
|
|
cls.OC, db)).splitlines()[0].strip()
|
|
command_items = [cls.OC, 'rsh', db_pod, 'ovn-{}ctl'.format(db)]
|
|
|
|
# obtain the OVN DB addresses so that requests are sent to the RAFT
|
|
# leader DB instance
|
|
cmd = '{} get ovndbcluster ovndbcluster-{} '.format(cls.OC, db)
|
|
cmd += '-o jsonpath="{.status.internalDbAddress}"'
|
|
all_db_addresses = cls.proxy_host_client.exec_command(cmd).strip()
|
|
command_items.append('--db="{}"'.format(all_db_addresses))
|
|
|
|
# obtain the ovsdb-server command running on this pod
|
|
# setsid added to prevent the following issue:
|
|
# https://github.com/kubevirt/kubevirt/issues/10240
|
|
# `||true` ensures a safe exit.
|
|
cmd = ' '.join((cls.OC,
|
|
'exec',
|
|
db_pod,
|
|
'-- setsid',
|
|
'ps -o command -C ovsdb-server --no-headers -ww',
|
|
'|| true'))
|
|
ovsdb_server_cmd = cls.proxy_host_client.exec_command(cmd)
|
|
assert 'ovsdb-server' in ovsdb_server_cmd, \
|
|
"command '{}' returned unexpected output: {}".format(
|
|
cmd, ovsdb_server_cmd)
|
|
# obtain the private-key, certificate and ca-cert options
|
|
for ssl_param in ('private-key', 'certificate', 'ca-cert'):
|
|
command_items.append(re.search(
|
|
r'--{}=[^\s]+'.format(ssl_param), ovsdb_server_cmd).group())
|
|
|
|
return ' '.join(command_items)
|
|
|
|
@classmethod
|
|
def _get_ovn_dbs(cls):
|
|
if WB_CONF.openstack_type == 'podified':
|
|
return [cls.get_podified_ovn_db_cmd('nb'),
|
|
cls.get_podified_ovn_db_cmd('sb')]
|
|
if WB_CONF.openstack_type == 'devstack':
|
|
sbdb = "unix:/usr/local/var/run/ovn/ovnsb_db.sock"
|
|
nbdb = sbdb.replace('sb', 'nb')
|
|
cmd = "sudo ovn-{}ctl --db={}"
|
|
return [cmd.format('nb', nbdb), cmd.format('sb', sbdb)]
|
|
|
|
def get_router_gateway_chassis(self, router_port_id):
|
|
cmd = "{} get port_binding cr-lrp-{} chassis".format(
|
|
self.sbctl, router_port_id)
|
|
LOG.debug("Waiting until port is bound to chassis")
|
|
self.chassis_id = None
|
|
|
|
def _port_binding_exist():
|
|
self.chassis_id = self.run_on_master_controller(cmd)
|
|
LOG.debug("chassis_id = '%s'", self.chassis_id)
|
|
if self.chassis_id != '[]':
|
|
return True
|
|
return False
|
|
|
|
try:
|
|
common_utils.wait_until_true(lambda: _port_binding_exist(),
|
|
timeout=30, sleep=5)
|
|
except common_utils.WaitTimeout:
|
|
self.fail("Port is not bound to chassis")
|
|
cmd = "{} get chassis {} hostname".format(self.sbctl, self.chassis_id)
|
|
LOG.debug("Running '%s' on the master node", cmd)
|
|
res = self.run_on_master_controller(cmd)
|
|
return res.replace('"', '').split('.')[0]
|
|
|
|
def get_router_gateway_chassis_list(self, router_port_id):
|
|
cmd = (self.nbctl + " lrp-get-gateway-chassis lrp-" + router_port_id)
|
|
data = self.run_on_master_controller(cmd)
|
|
return [re.sub(r'.*_(.*?)\s.*', r'\1', s) for s in data.splitlines()]
|
|
|
|
def get_router_gateway_chassis_by_id(self, chassis_id):
|
|
res = self.run_on_master_controller(
|
|
self.sbctl + " get chassis " + chassis_id + " hostname").rstrip()
|
|
return res.replace('"', '').split('.')[0]
|
|
|
|
def get_router_port_gateway_mtu(self, router_port_id):
|
|
cmd = (self.nbctl + " get logical_router_port lrp-" + router_port_id +
|
|
" options:gateway_mtu")
|
|
return int(
|
|
self.run_on_master_controller(cmd).rstrip().strip('"'))
|
|
|
|
def get_item_uuid(self, db, item, search_string):
|
|
ovn_db = self.sbctl if db == 'sb' else self.nbctl
|
|
cmd = (ovn_db + " find " + item + " " + search_string +
|
|
" | grep _uuid | awk '{print $3}'")
|
|
return self.run_on_master_controller(cmd)
|
|
|
|
def get_datapath_tunnel_key(self, search_string):
|
|
cmd = (self.sbctl + " find datapath_binding " + search_string +
|
|
" | grep tunnel_key | awk '{print $3}'")
|
|
return self.run_on_master_controller(cmd)
|
|
|
|
def get_logical_switch(self, port):
|
|
"""Returns logical switch name that port is connected to
|
|
|
|
Fuction gets the logical switch name without its ID from the
|
|
`ovn-nbctl lsp-get-ls <PORT_NAME>` command
|
|
"""
|
|
cmd = '{cmd} lsp-get-ls {port}'.format(cmd=self.nbctl, port=port)
|
|
output = self.run_on_master_controller(cmd)
|
|
ls_name = re.search('neutron-[^)]*', output)
|
|
if ls_name:
|
|
return ls_name.group()
|
|
else:
|
|
return ''
|
|
|
|
def get_physical_net(self, port):
|
|
"""Returns physical network name that port has configured with
|
|
|
|
Physical network name is saved as option in the logical switch port
|
|
record in OVN north database. It can be queried with
|
|
`ovn-nbctl lsp-get-options <PORT_NAME>` command but this output may
|
|
contain more than one option so it is better to get the value with
|
|
`ovn-nbctl get Logical_Switch_Port <PORT_NAME> options:network_name`
|
|
command
|
|
"""
|
|
cmd = '{cmd} get Logical_Switch_Port {port} '\
|
|
'options:network_name'.format(cmd=self.nbctl, port=port)
|
|
return self.run_on_master_controller(cmd)
|
|
|
|
def verify_that_segment_deleted(self, segment_id):
|
|
"""Checks that the segment id is not in the OVN database
|
|
|
|
There shouldn't be 'provnet-<SEGEMTN_ID>' port in the OVN database
|
|
after the segment has been deleted
|
|
"""
|
|
cmd = '{cmd} find Logical_Switch_Port '\
|
|
'name=provnet-{sid}'.format(cmd=self.nbctl, sid=segment_id)
|
|
output = self.run_on_master_controller(cmd)
|
|
self.assertEqual(output, '')
|
|
|
|
def get_mac_mappings(self, client, physical_network):
|
|
output = client.exec_command(
|
|
"sudo ovs-vsctl get open . external_ids:ovn-chassis-mac-"
|
|
"mappings").strip().replace('"', '').split(',')
|
|
for value in output:
|
|
if physical_network in value:
|
|
return value.replace(physical_network + ':', '')
|
|
|
|
|
|
class BaseDisruptiveTempestTestCase(BaseTempestWhiteboxTestCase):
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(BaseDisruptiveTempestTestCase, cls).resource_setup()
|
|
try:
|
|
cls.proxy_host_client.exec_command(
|
|
"timeout 10 ssh {} virsh list".format(WB_CONF.hypervisor_host))
|
|
cls.hypervisor_host = WB_CONF.hypervisor_host
|
|
return
|
|
except lib_exceptions.SSHExecCommandFailed:
|
|
LOG.debug("Attempt to execute virsh command on hypervisor_host: "
|
|
"'%s' failed. Trying to discover hypervisor host from "
|
|
".ssh/config file.", WB_CONF.hypervisor_host)
|
|
# Depending on ci-fmw version and/or setup, .ssh/config file could
|
|
# include an entry for either hypervisor or hypervisor-1
|
|
host = cls.proxy_host_client.exec_command(
|
|
r"grep 'Host.*\ \(hypervisor\|hypervisor-1\)$' ~/.ssh/config "
|
|
"| cut -d' ' -f 2").strip()
|
|
|
|
try:
|
|
cls.proxy_host_client.exec_command(
|
|
"timeout 10 ssh {} virsh list".format(host))
|
|
except lib_exceptions.SSHExecCommandFailed:
|
|
raise cls.skipException(
|
|
"No access to virsh tool on hypervisor node. Please make sure "
|
|
"that hypervisor_host is configured properly and/or virsh "
|
|
"is deployed there.")
|
|
cls.hypervisor_host = host
|
|
|
|
@classmethod
|
|
def find_host_virsh_name(cls, host):
|
|
cmd = ("timeout 10 ssh {} sudo virsh list --all --name "
|
|
"| grep -w {}").format(
|
|
cls.hypervisor_host, host)
|
|
return cls.proxy_host_client.exec_command(cmd).strip()
|
|
|
|
@classmethod
|
|
def is_host_state_is_shut_off(cls, host):
|
|
cmd = ("timeout 10 ssh {} virsh list --state-shutoff | grep -w {} "
|
|
"|| true".format(cls.hypervisor_host, host))
|
|
output = cls.proxy_host_client.exec_command(cmd)
|
|
return host in output
|
|
|
|
@classmethod
|
|
def is_host_loginable(cls, host):
|
|
cmd = "timeout 10 ssh {} ssh {} hostname || true".format(
|
|
cls.hypervisor_host, host)
|
|
output = cls.proxy_host_client.exec_command(cmd)
|
|
return host in output
|
|
|
|
@classmethod
|
|
def power_off_host(cls, host):
|
|
if not WB_CONF.run_power_operations_tests:
|
|
raise cls.skipException("Power operations are not allowed")
|
|
cmd = "timeout 10 ssh {} sudo virsh destroy {}".format(
|
|
cls.hypervisor_host, cls.find_host_virsh_name(host))
|
|
cls.proxy_host_client.exec_command(cmd)
|
|
common_utils.wait_until_true(
|
|
lambda: cls.is_host_state_is_shut_off(host),
|
|
timeout=30, sleep=5)
|
|
|
|
@classmethod
|
|
def power_on_host(cls, host):
|
|
if not WB_CONF.run_power_operations_tests:
|
|
raise cls.skipException("Power operations are not allowed")
|
|
cmd = "timeout 10 ssh {} sudo virsh start {}".format(
|
|
cls.hypervisor_host, cls.find_host_virsh_name(host))
|
|
cls.proxy_host_client.exec_command(cmd)
|
|
# TODO(rsafrono): implement and apply additional health checks
|
|
common_utils.wait_until_true(
|
|
lambda: cls.is_host_loginable(host),
|
|
timeout=120, sleep=5)
|
|
|
|
@classmethod
|
|
def reboot_host(cls, host):
|
|
if not WB_CONF.run_power_operations_tests:
|
|
raise cls.skipException("Power operations are not allowed")
|
|
cmd = "timeout 10 ssh {} sudo virsh reboot {}".format(
|
|
cls.hypervisor_host, cls.find_host_virsh_name(host))
|
|
cls.proxy_host_client.exec_command(cmd)
|
|
common_utils.wait_until_true(
|
|
lambda: cls.is_host_loginable(host),
|
|
timeout=120, sleep=5)
|
|
|
|
def ensure_overcloud_nodes_active(self):
|
|
"""Checks all openstack nodes are up, otherwise activates them.
|
|
"""
|
|
hosts = self.proxy_host_client.exec_command(
|
|
"timeout 10 ssh {} sudo virsh list --all --name".format(
|
|
self.hypervisor_host)).strip().split()
|
|
for host in hosts:
|
|
if self.is_host_state_is_shut_off(host):
|
|
self.power_on_host(host)
|
|
|
|
|
|
# user_data_cmd is used to generate a VLAN interface on VM instances with PF
|
|
# ports
|
|
user_data_cmd = """
|
|
#cloud-config
|
|
write_files:
|
|
- path: "/etc/sysconfig/network-scripts/ifcfg-%s"
|
|
owner: "root"
|
|
permissions: "777"
|
|
content: |
|
|
DEVICE="%s"
|
|
BOOTPROTO="dhcp"
|
|
ONBOOT="yes"
|
|
VLAN="yes"
|
|
PERSISTENT_DHCLIENT="yes"
|
|
runcmd:
|
|
- [ sh, -c , "systemctl restart NetworkManager" ]
|
|
"""
|
|
user_data_cmd = user_data_cmd.replace('\t', '')
|
|
|
|
|
|
def build_user_data(net_vlan):
|
|
"""user_data is required when direct-physical (PF) ports are used
|
|
"""
|
|
if_full_name = '%s.%s' % \
|
|
(WB_CONF.default_instance_interface,
|
|
net_vlan)
|
|
user_data = base64.b64encode((
|
|
user_data_cmd % (if_full_name, if_full_name)).encode("utf-8"))
|
|
return user_data
|
|
|
|
|
|
class ProviderBaseTest(BaseTempestWhiteboxTestCase):
|
|
"""Base class for tests using provider networks, such as provider routed
|
|
networks or sriov scenarios
|
|
Admin user is needed to create ports on the existing provisioning network
|
|
"""
|
|
servers = []
|
|
keypairs_client = None
|
|
secgroup_client = None
|
|
servers_client = None
|
|
|
|
extra_dhcp_opts = None
|
|
|
|
@classmethod
|
|
def create_loginable_secgroup_rule(cls, secgroup_id=None,
|
|
client=None):
|
|
"""This rule is intended to permit inbound IPv4 and IPv6 ssh
|
|
"""
|
|
cls.create_security_group_rule(
|
|
security_group_id=secgroup_id,
|
|
client=client,
|
|
protocol='tcp',
|
|
direction='ingress',
|
|
ip_version=6,
|
|
port_range_min=22,
|
|
port_range_max=22)
|
|
cls.create_security_group_rule(
|
|
security_group_id=secgroup_id,
|
|
client=client,
|
|
protocol='tcp',
|
|
direction='ingress',
|
|
port_range_min=22,
|
|
port_range_max=22)
|
|
|
|
@classmethod
|
|
def create_pingable_secgroup_rule(cls, secgroup_id=None,
|
|
client=None):
|
|
"""This rule is intended to permit inbound IPv4 and IPv6 ping
|
|
"""
|
|
cls.create_security_group_rule(
|
|
security_group_id=secgroup_id,
|
|
client=client,
|
|
protocol='icmp',
|
|
direction='ingress')
|
|
cls.create_security_group_rule(
|
|
security_group_id=secgroup_id,
|
|
client=client,
|
|
protocol='icmpv6',
|
|
ip_version=6,
|
|
direction='ingress')
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
super(ProviderBaseTest, cls).resource_setup()
|
|
# setup basic topology for servers we can log into it
|
|
cls.keypair = cls.create_keypair(client=cls.keypairs_client)
|
|
secgroup_name = data_utils.rand_name('secgroup')
|
|
if cls.secgroup_client:
|
|
cls.secgroup = cls.secgroup_client.create_security_group(
|
|
name=secgroup_name)['security_group']
|
|
else:
|
|
cls.secgroup = cls.client.create_security_group(
|
|
name=secgroup_name)['security_group']
|
|
cls.security_groups.append(cls.secgroup)
|
|
cls.create_loginable_secgroup_rule(
|
|
secgroup_id=cls.secgroup['id'],
|
|
client=cls.client)
|
|
cls.create_pingable_secgroup_rule(
|
|
secgroup_id=cls.secgroup['id'],
|
|
client=cls.client)
|
|
|
|
@classmethod
|
|
def resource_cleanup(cls):
|
|
client = cls.servers_client or cls.os_primary.servers_client
|
|
for server in cls.servers:
|
|
cls._try_delete_resource(client.delete_server,
|
|
server['id'])
|
|
waiters.wait_for_server_termination(client,
|
|
server['id'])
|
|
super(ProviderBaseTest, cls).resource_cleanup()
|
|
|
|
@classmethod
|
|
def create_network_with_port(cls, cidr, gateway=True, **kwargs):
|
|
cls.network = cls.create_network()
|
|
if not gateway:
|
|
# some subnets need to be created without a default gateway
|
|
# e.g.: when a server is created with two ports, one of them should
|
|
# not include a default gateway
|
|
cls.subnet = cls.create_subnet(
|
|
cls.network, cidr=cidr, gateway=None)
|
|
else:
|
|
cls.subnet = cls.create_subnet(cls.network, cidr=cidr)
|
|
cls.port_id = cls.create_port(network=cls.network, **kwargs)['id']
|
|
return {'port': cls.port_id}
|
|
|
|
def build_create_port_body_and_secgroups(self, port_type, secgroup):
|
|
"""create_port_body and security_groups are needed to create ports,
|
|
whatever their types are (normal, macvtap, direct or direct-physical)
|
|
"""
|
|
create_port_body = {}
|
|
security_groups = []
|
|
if port_type not in ('direct', 'direct-physical', 'macvtap'):
|
|
create_port_body['security_groups'] = [secgroup['id']]
|
|
security_groups = [{'name': secgroup['name']}]
|
|
create_port_body['binding:vnic_type'] = port_type
|
|
create_port_body['name'] = "_".join(['port', port_type])
|
|
if self.extra_dhcp_opts is not None:
|
|
create_port_body['extra_dhcp_opts'] = self.extra_dhcp_opts
|
|
|
|
return (create_port_body, security_groups)
|
|
|
|
def _create_server(self, **kwargs):
|
|
kwargs['client'] = \
|
|
self.servers_client or self.os_primary.servers_client
|
|
kwargs['flavor_ref'] = self.flavor_ref
|
|
kwargs['image_ref'] = self.image_ref
|
|
kwargs['key_name'] = self.keypair['name']
|
|
server = self.create_server(**kwargs)
|
|
self.servers.append(server['server'])
|
|
return server['server']
|
|
|
|
def _create_network_port(self, port_type,
|
|
reuse_port=False,
|
|
use_provider_net=True,
|
|
subnet_id=None,
|
|
reused_tenant_net=None,
|
|
cidr=None,
|
|
gateway=True):
|
|
create_port_body, security_groups = \
|
|
self.build_create_port_body_and_secgroups(
|
|
port_type, self.secgroup)
|
|
if subnet_id:
|
|
subnet_name = 'segment' + subnet_id
|
|
if use_provider_net:
|
|
self.network = fixed_network.get_network_from_name(
|
|
CONF.network.floating_network_name, self.client)
|
|
if not reuse_port:
|
|
if not subnet_id:
|
|
self.create_port(
|
|
network=self.network,
|
|
fixed_ips=[{'subnet_id': self.network['subnets'][-1]}],
|
|
**create_port_body)
|
|
else:
|
|
subnets = self.client.list_subnets(name=subnet_name)
|
|
subnet_id = {'subnet_id': subnets['subnets'][-1]['id']}
|
|
self.create_port(
|
|
network=self.network,
|
|
fixed_ips=[subnet_id],
|
|
**create_port_body)
|
|
|
|
port = {'port': self.ports[-1]['id']}
|
|
elif not reused_tenant_net:
|
|
port = self.create_network_with_port(
|
|
cidr=cidr, gateway=gateway, **create_port_body)
|
|
else:
|
|
self.network = reused_tenant_net
|
|
port = {'port': self.create_port(network=self.network,
|
|
**create_port_body)['id']}
|
|
if not subnet_id:
|
|
net_id = self.network['id']
|
|
nc = self.admin_manager.network_client
|
|
net_vlan = nc.show_network(net_id)['network'][
|
|
'provider:segmentation_id']
|
|
else:
|
|
segments = self.client.list_segments(name=subnet_name)
|
|
net_vlan = segments['segments'][-1]['segmentation_id']
|
|
user_data = ""
|
|
config_drive = False
|
|
if port_type == 'direct-physical':
|
|
user_data = build_user_data(net_vlan)
|
|
config_drive = True
|
|
return (security_groups, port, user_data, config_drive)
|
|
|
|
def check_port_status(self, port_type,
|
|
port_index=-1, server_index=-1):
|
|
# by default, use last created port (-1) and last created server (-1)
|
|
port_id = self.ports[port_index]['id']
|
|
server_id = self.servers[server_index]['id']
|
|
waiters.wait_for_interface_status(self.os_adm.interfaces_client,
|
|
server_id,
|
|
port_id,
|
|
constants.PORT_STATUS_ACTIVE)
|
|
port_details = self.client.show_port(port_id)['port']
|
|
network = fixed_network.get_network_from_name(
|
|
CONF.network.floating_network_name, self.client)
|
|
self.assertEqual(port_details['network_id'], network['id'])
|
|
self.assertEqual(port_details['admin_state_up'], True)
|
|
self.assertEqual(port_details['binding:vnic_type'], port_type)
|
|
return port_details
|