Merge "Add global nodes variable"

This commit is contained in:
Zuul 2023-11-16 16:53:03 +00:00 committed by Gerrit Code Review
commit 133a20999a
14 changed files with 76 additions and 105 deletions

View File

@ -18,15 +18,7 @@ function configure {
iniset $TEMPEST_CONFIG whitebox rx_queue_size $WHITEBOX_RX_QUEUE_SIZE
iniset $TEMPEST_CONFIG whitebox default_video_model $WHITEBOX_DEFAULT_VIDEO_MODEL
iniset $TEMPEST_CONFIG whitebox max_disk_devices_to_attach $WHITEBOX_MAX_DISK_DEVICES_TO_ATTACH
iniset $TEMPEST_CONFIG whitebox-nova-compute config_path "$WHITEBOX_NOVA_COMPUTE_CONFIG_PATH"
iniset $TEMPEST_CONFIG whitebox-nova-compute stop_command "$WHITEBOX_NOVA_COMPUTE_STOP_COMMAND"
iniset $TEMPEST_CONFIG whitebox-nova-compute start_command "$WHITEBOX_NOVA_COMPUTE_START_COMMAND"
iniset $TEMPEST_CONFIG whitebox-libvirt start_command "$WHITEBOX_LIBVIRT_START_COMMAND"
iniset $TEMPEST_CONFIG whitebox-libvirt stop_command "$WHITEBOX_LIBVIRT_STOP_COMMAND"
iniset $TEMPEST_CONFIG whitebox-libvirt mask_command "$WHITEBOX_LIBVIRT_MASK_COMMAND"
iniset $TEMPEST_CONFIG whitebox-libvirt unmask_command "$WHITEBOX_LIBVIRT_UNMASK_COMMAND"
iniset $TEMPEST_CONFIG whitebox nodes_yaml $WHITEBOX_NODES_YAML
iniset $TEMPEST_CONFIG whitebox-database user $DATABASE_USER
iniset $TEMPEST_CONFIG whitebox-database password $DATABASE_PASSWORD

View File

@ -6,15 +6,7 @@ WHITEBOX_FILE_BACKED_MEMORY_SIZE=${WHITEBOX_FILE_BACKED_MEMORY_SIZE:-8192}
WHITEBOX_RX_QUEUE_SIZE=${WHITEBOX_RX_QUEUE_SIZE:-1024}
WHITEBOX_DEFAULT_VIDEO_MODEL=${WHITEBOX_DEFAULT_VIDEO_MODEL:-'virtio'}
WHITEBOX_MAX_DISK_DEVICES_TO_ATTACH=${WHITEBOX_MAX_DISK_DEVICES_TO_ATTACH:-7}
WHITEBOX_NOVA_COMPUTE_CONFIG_PATH=${WHITEBOX_NOVA_COMPUTE_CONFIG_PATH:-/etc/nova/nova-cpu.conf}
WHITEBOX_NOVA_COMPUTE_STOP_COMMAND=${WHITEBOX_NOVA_COMPUTE_STOP_COMMAND:-'systemctl stop devstack@n-cpu'}
WHITEBOX_NOVA_COMPUTE_START_COMMAND=${WHITEBOX_NOVA_COMPUTE_START_COMMAND:-'systemctl start devstack@n-cpu'}
WHITEBOX_LIBVIRT_START_COMMAND=${WHITEBOX_LIBVIRT_START_COMMAND:-'systemctl start libvirtd'}
WHITEBOX_LIBVIRT_STOP_COMMAND=${WHITEBOX_LIBVIRT_STOP_COMMAND:-'systemctl stop libvirtd'}
WHITEBOX_LIBVIRT_MASK_COMMAND=${WHITEBOX_LIBVIRT_MASK_COMMAND:-'systemctl mask libvirtd'}
WHITEBOX_LIBVIRT_UNMASK_COMMAND=${WHITEBOX_LIBVIRT_UNMASK_COMMAND:-'systemctl unmask libvirtd'}
WHITEBOX_NODES_YAML=${WHITEBOX_NODES_YAML:-'/home/zuul/compute_nodes.yaml'}
WHITEBOX_CPU_TOPOLOGY=${WHITEBOX_CPU_TOPOLOGY:-''}
WHITEBOX_DEDICATED_CPUS_PER_NUMA=${WHITEBOX_DEDICATED_CPUS_PER_NUMA:-4}

View File

@ -0,0 +1,13 @@
{% for compute in computes -%}
{{ compute }}:
services:
libvirt:
start_command: 'systemctl start libvirtd'
stop_command: 'systemctl stop libvirtd'
mask_command: 'systemctl mask libvirtd'
unmask_command: 'systemctl unmask libvirtd'
nova-compute:
config_path: '/etc/nova/nova-cpu.conf'
start_command: 'systemctl start devstack@n-cpu'
stop_command: 'systemctl stop devstack@n-cpu'
{% endfor %}

View File

@ -26,4 +26,22 @@
name: copy-build-sshkey
vars:
ansible_become: yes
copy_sshkey_target_user: 'tempest'
copy_sshkey_target_user: 'tempest'
- name: Collect compute hostnames
set_fact:
computes: "{{ ansible_play_hosts_all|map('extract', hostvars, 'ansible_fqdn')|list }}"
run_once: true
- name: Render compute_nodes.yaml template
template:
src: ../templates/compute_nodes.yaml.j2
dest: /home/zuul/compute_nodes.yaml
run_once: true
delegate_to: controller
- name: Output the rendered file at /home/zuul/compute_nodes.yaml
shell: |
cat /home/zuul/compute_nodes.yaml
run_once: true
delegate_to: controller

View File

@ -1,6 +1,6 @@
[tox]
minversion = 3.18.0
envlist = pep8,py{36,38,39,310}
envlist = pep8
skip_missing_interpreters = True
# Automatic envs (pyXX) will only use the python version appropriate to that
# env and ignore basepython inherited from [testenv] if we set
@ -16,9 +16,6 @@ allowlist_externals = *
deps =
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
find . -type f -name "*.pyc" -delete
stestr run {posargs}
[testenv:pep8]
commands =

View File

@ -24,7 +24,6 @@ from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from whitebox_tempest_plugin.services import clients
from whitebox_tempest_plugin import utils as whitebox_utils
if six.PY2:
import contextlib2 as contextlib
@ -125,17 +124,15 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
def get_server_xml(self, server_id):
server = self.os_admin.servers_client.show_server(server_id)['server']
host = server['OS-EXT-SRV-ATTR:host']
cntrlplane_addr = whitebox_utils.get_ctlplane_address(host)
server_instance_name = server['OS-EXT-SRV-ATTR:instance_name']
virshxml = clients.VirshXMLClient(cntrlplane_addr)
virshxml = clients.VirshXMLClient(host)
xml = virshxml.dumpxml(server_instance_name)
return ET.fromstring(xml)
def get_server_blockdevice_path(self, server_id, device_name):
host = self.get_host_for_server(server_id)
cntrlplane_addr = whitebox_utils.get_ctlplane_address(host)
virshxml = clients.VirshXMLClient(cntrlplane_addr)
virshxml = clients.VirshXMLClient(host)
blklist = virshxml.domblklist(server_id).splitlines()
source = None
for line in blklist:

View File

@ -285,7 +285,6 @@ class CPUThreadPolicyTest(BasePinningTest):
"""
siblings = {}
host = whitebox_utils.get_ctlplane_address(host)
virshxml = clients.VirshXMLClient(host)
capxml = virshxml.capabilities()
root = ET.fromstring(capxml)

View File

@ -19,7 +19,6 @@ from tempest import config
from whitebox_tempest_plugin.api.compute import base
from whitebox_tempest_plugin.services import clients
from whitebox_tempest_plugin.utils import get_ctlplane_address
CONF = config.CONF
LOG = logging.getLogger(__name__)
@ -66,8 +65,7 @@ class TestRBDDirectDownload(base.BaseWhiteboxComputeTest):
log_query_string = f"Attempting to export RBD image: " \
f"[[]pool_name: {rbd_pool}[]] [[]image_uuid: " \
f"{image_id}[]]"
host_ip = get_ctlplane_address(host)
logs_client = clients.LogParserClient(host_ip)
logs_client = clients.LogParserClient(host)
# Assert if log with specified image is found
self.assertTrue(len(logs_client.parse(log_query_string)))
path = self.get_server_blockdevice_path(server['id'], 'vda')

View File

@ -18,7 +18,6 @@ from tempest import config
from whitebox_tempest_plugin.api.compute import base
from whitebox_tempest_plugin.services.clients import QEMUImgClient
from whitebox_tempest_plugin.utils import get_ctlplane_address
CONF = config.CONF
LOG = logging.getLogger(__name__)
@ -107,7 +106,7 @@ class TestQEMUVolumeEncryption(base.BaseWhiteboxComputeTest):
# Get volume details from qemu-img info with the previously generated
# volume path
host = get_ctlplane_address(self.get_host_for_server(server['id']))
host = self.get_host_for_server(server['id'])
qemu_img_client = QEMUImgClient(host)
qemu_info = qemu_img_client.info(path)

View File

@ -22,7 +22,6 @@ from tempest import config
from whitebox_tempest_plugin.api.compute import base
from whitebox_tempest_plugin.services import clients
from whitebox_tempest_plugin import utils as whitebox_utils
CONF = config.CONF
@ -71,8 +70,7 @@ class VolumesAdminNegativeTest(base.BaseWhiteboxComputeTest,
self.assertGreater(
len(disks_after_attach),
len(disks_before_attach))
host = whitebox_utils.get_ctlplane_address(
self.get_host_for_server(server['id']))
host = self.get_host_for_server(server['id'])
with clients.ServiceManager(host, 'libvirt').stopped():
# While this call to n-api will return successfully the underlying

View File

@ -22,6 +22,9 @@ general_group = cfg.OptGroup(
title='General Whitebox Tempest plugin config options')
general_opts = [
cfg.StrOpt(
'nodes_yaml',
help='File path to the yaml description file of the compute hosts '),
cfg.StrOpt(
'ctlplane_ssh_username',
help='Username to use when accessing controllers and/or compute hosts '
@ -133,59 +136,13 @@ nova_compute_group = cfg.OptGroup(
title='Config options to manage the nova-compute service')
nova_compute_opts = [
cfg.StrOpt(
'config_path',
help='Path to the configuration file for the nova-compute service.'),
cfg.StrOpt(
'start_command',
help='Command to start the nova-compute service, without any '
'privilege management (ie, no sudo).'),
cfg.StrOpt(
'stop_command',
help='Command to stop the nova-compute service, without any '
'privilege management (ie, no sudo).'),
cfg.StrOpt(
'log_query_command',
default="journalctl",
choices=["journalctl", "zgrep"],
help="Name of the utility to run LogParserClient commands. "
"Currently, supported values are 'journalctl' (default) "
"for devstack and 'zgrep' for TripleO"),
]
libvirt_group = cfg.OptGroup(
name='whitebox-libvirt',
title='Config options to manage the libvirt service')
libvirt_opts = [
cfg.StrOpt(
'start_command',
help='Command to start the libvirt service, without any '
'privilege management (ie, no sudo).'),
cfg.StrOpt(
'stop_command',
help='Command to stop the libvirt service, without any '
'privilege management (ie, no sudo).',
deprecated_opts=[cfg.DeprecatedOpt('stop_command',
group='whitebox-nova-libvirt')]),
cfg.StrOpt(
'mask_command',
help='In some situations (Ubuntu Focal, for example), libvirtd can '
'be activated by other systemd units even if it is stopped. '
'In such cases, it can be useful to mask a service (ie, disable '
'it completely) to prevent it from being started outside of our '
'control. This config options sets the command to mask libvirt. '
'If set, it will be executed after every stop command.'),
cfg.StrOpt(
'unmask_command',
help='Similar to the mask_command option, this config options sets '
'the command to unmask libvirt. If set, it will be run before '
'every start command.'),
cfg.StrOpt(
'libvirt_container_name',
default="nova_libvirt",
help='The container name to use when needing to interact with the '
'respective virsh command of the compute host'),
"for devstack and 'zgrep' for TripleO")
]
database_group = cfg.OptGroup(

View File

@ -38,8 +38,6 @@ class WhiteboxTempestPlugin(plugins.TempestPlugin):
whitebox_config.nova_compute_opts)
config.register_opt_group(conf, whitebox_config.database_group,
whitebox_config.database_opts)
config.register_opt_group(conf, whitebox_config.libvirt_group,
whitebox_config.libvirt_opts)
config.register_opt_group(conf, whitebox_config.hardware_group,
whitebox_config.hardware_opts)
config.register_opt_group(conf, config.compute_features_group,
@ -50,8 +48,6 @@ class WhiteboxTempestPlugin(plugins.TempestPlugin):
whitebox_config.general_opts),
(whitebox_config.nova_compute_group.name,
whitebox_config.nova_compute_opts),
(whitebox_config.libvirt_group.name,
whitebox_config.libvirt_opts),
(whitebox_config.database_group.name,
whitebox_config.database_opts),
(whitebox_config.hardware_group.name,

View File

@ -36,10 +36,11 @@ LOG = logging.getLogger(__name__)
class SSHClient(object):
"""A client to execute remote commands, based on tempest.lib.common.ssh."""
def __init__(self, ctlplane_address):
def __init__(self, host):
self.ssh_key = CONF.whitebox.ctlplane_ssh_private_key_path
self.ssh_user = CONF.whitebox.ctlplane_ssh_username
self.ctlplane_address = ctlplane_address
self.host_parameters = whitebox_utils.get_host_details(host)
self.ctlplane_address = whitebox_utils.get_ctlplane_address(host)
def execute(self, command, container_name=None, sudo=False):
ssh_client = ssh.Client(self.ctlplane_address, self.ssh_user,
@ -59,9 +60,12 @@ class SSHClient(object):
class VirshXMLClient(SSHClient):
"""A client to obtain libvirt XML from a remote host."""
def __init__(self, ctlplane_address):
super(VirshXMLClient, self).__init__(ctlplane_address)
self.container_name = CONF.whitebox_libvirt.libvirt_container_name
def __init__(self, host):
super(VirshXMLClient, self).__init__(host)
service_dict = self.host_parameters.get('services', {}).get('libvirt')
if service_dict is None:
raise exceptions.MissingServiceSectionException(service='libvirt')
self.container_name = service_dict.get('container_name')
def dumpxml(self, domain):
command = 'virsh dumpxml %s' % domain
@ -96,7 +100,10 @@ class QEMUImgClient(SSHClient):
def __init__(self, ctlplane_address):
super(QEMUImgClient, self).__init__(ctlplane_address)
self.container_name = CONF.whitebox_libvirt.libvirt_container_name
service_dict = self.host_parameters.get('services', {}).get('libvirt')
if service_dict is None:
raise exceptions.MissingServiceSectionException(service='libvirt')
self.container_name = service_dict.get('container_name')
def info(self, path):
command = 'qemu-img info --output=json --force-share %s' % path
@ -120,15 +127,15 @@ class ServiceManager(SSHClient):
this must match the binary in the Nova os-services API.
"""
super(ServiceManager, self).__init__(hostname)
conf = getattr(CONF, 'whitebox-%s' % service, None)
if conf is None:
service_dict = self.host_parameters.get('services', {}).get(service)
if service_dict is None:
raise exceptions.MissingServiceSectionException(service=service)
self.service = service
self.config_path = getattr(conf, 'config_path', None)
self.start_command = getattr(conf, 'start_command', None)
self.stop_command = getattr(conf, 'stop_command', None)
self.mask_command = getattr(conf, 'mask_command', None)
self.unmask_command = getattr(conf, 'unmask_command', None)
self.config_path = service_dict.get('config_path')
self.start_command = service_dict.get('start_command')
self.stop_command = service_dict.get('stop_command')
self.mask_command = service_dict.get('mask_command')
self.unmask_command = service_dict.get('unmask_command')
@contextlib.contextmanager
def config_options(self, *opts):
@ -222,10 +229,7 @@ class NovaServiceManager(ServiceManager):
"""
def __init__(self, host, service, services_client):
super(NovaServiceManager, self).__init__(
whitebox_utils.get_ctlplane_address(host),
service
)
super(NovaServiceManager, self).__init__(host, service)
self.services_client = services_client
self.host = host

View File

@ -17,6 +17,7 @@ import six
from oslo_serialization import jsonutils
from tempest import config
from whitebox_tempest_plugin import exceptions
import yaml
if six.PY2:
import contextlib2 as contextlib
@ -24,6 +25,7 @@ else:
import contextlib
CONF = config.CONF
_nodes = None
def normalize_json(json):
@ -70,3 +72,12 @@ def get_ctlplane_address(compute_hostname):
return CONF.whitebox.ctlplane_addresses[compute_hostname]
raise exceptions.CtrlplaneAddressResolutionError(host=compute_hostname)
def get_host_details(host):
global _nodes
if _nodes is None:
nodes_location = CONF.whitebox.nodes_yaml
with open(nodes_location, "r") as f:
_nodes = yaml.safe_load(f)
return _nodes.get(host)