Fix podified configuration set and wait w/o overwrite

The neutron pod or any other service contains separate custom
configuration buffer for non default configuration.

Any configuration change should retrieve and add existing configuration
first, this change adds such fix to avoid deleting previous settings.

This fix also waits for old service pod to be replaced before allowing
the test to continue.

Change-Id: I4028c9f9fed3d38c3bcaa698e3a119a9656b0886
This commit is contained in:
Maor Blaustein 2024-07-02 00:11:39 +03:00
parent c1c17025c6
commit a3488f9cae
2 changed files with 71 additions and 20 deletions

View File

@ -90,7 +90,7 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
cls.master_node_client = cls.proxy_host_client
cls.master_cont_cmd_executor = cls.proxy_host_client.exec_command
cls.neutron_api_prefix = '{} rsh {} '.format(
cls.OC, cls.get_pod_of_service())
cls.OC, cls.get_pods_of_service()[0])
else:
LOG.warning(("Unrecognized deployer tool '{}', plugin supports "
"openstack_type as devstack/podified.".format(
@ -342,24 +342,26 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
return node[setting]
@classmethod
def get_pod_of_service(cls, service='neutron'):
pods_list = ("{} get pods -o=name "
"--field-selector=status.phase=Running".format(cls.OC))
def get_pods_of_service(cls, service='neutron', pod_state='Running'):
# pod_state as empty string can be used to get all pod states
if pod_state:
pod_state = "--field-selector=status.phase={}".format(pod_state)
pods_list = "{} get pods -o=name {}".format(cls.OC, pod_state)
if service == 'neutron':
filters = "cut -d'/' -f 2 | grep ^neutron | grep -v meta"
else:
filters = "cut -d'/' -f 2 | grep {}".format(service)
# return only the first result, in case of several replicas
return cls.proxy_host_client.exec_command(
"{} | {}".format(pods_list, filters)).splitlines()[0].strip()
pods_output = cls.proxy_host_client.exec_command(
"{} | {}; true".format(pods_list, filters))
return [pod.strip() for pod in pods_output.splitlines()]
@classmethod
def get_configs_of_service(cls, service='neutron'):
# (rsafrono) at this moment only neutron configs were handled
# since it's the only service that existing tests are using
if service == 'neutron':
pod = cls.get_pod_of_service(service)
pod = cls.get_pods_of_service(service)[0]
return cls.proxy_host_client.exec_command(
'{} rsh {} find {} -type f'.format(
cls.OC, pod, os.path.split(
@ -387,19 +389,68 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
raise cls.skipException(
"Setting computes configuration not supported yet on "
"podified setups (TODO).")
patch_buffer = '''
spec:
{}:
template:
customServiceConfig: |
[{}]
{} = {}'''.format(
service, section, param, value)
service_pod = cls.get_pods_of_service(service)[0]
# TODO(mblue): process ini in python instead of crudini command,
# without depending on hardcoded conf filenames, crudini bin in pod
custom_file = '02-neutron-custom.conf'
# combine configuration to stdout using mutable copy in service pod
# NOTE(mblue): 'bash -c' needed for 'oc rsh' to execute a few
# commands in pod shell session (instead of outside session).
combine_conf_cmd = (
"{0} rsh {1} bash -c '"
"cp /etc/neutron/neutron.conf.d/{2} /tmp/ && "
"chmod g+w /tmp/{2} && "
"crudini --set --output=- /tmp/{2} {3} {4} {5}'").format(
cls.OC, service_pod,
custom_file, section, param, value)
combined_conf = cls.proxy_host_client.exec_command(
combine_conf_cmd)
# correct indentation required for configuration to be accepted
combined_conf_ind = combined_conf.replace('\n', '\n' + 8 * ' ')
patch_buffer = (
'spec:\n'
' {}:\n'
' template:\n'
' customServiceConfig: |\n'
' {}\n'
).format(
service,
combined_conf_ind)
cmd = ("{0} patch $({0} get oscp -o name) --type=merge "
"--patch '".format(cls.OC) + patch_buffer + "'")
LOG.debug("Set configuration command:\n%s", cmd)
output = cls.proxy_host_client.exec_command(cmd)
LOG.debug("Output:\n%s", output)
# TODO(mblue): Add another check using network agent list
# status with neutron api (as was in downstream code).
# wait until old service pod is fully replaced
def _service_pod_replaced():
_service_pods = cls.get_pods_of_service(
service=service,
pod_state='')
term_service_pods = cls.get_pods_of_service(
service=service,
pod_state='Terminating')
# conditions:
# 1) any service pod listed
# 2) old service pod removed (in case replacement didn't start)
# 3) no terminating service pods (replacement finished)
return len(_service_pods) > 0 and \
service_pod not in _service_pods and \
len(term_service_pods) == 0
_timeout = 120
common_utils.wait_until_true(
_service_pod_replaced,
timeout=_timeout,
sleep=10,
exception=RuntimeError(
"'{}' pod not replaced in {} seconds:\n{}".format(
service_pod,
_timeout,
cls.get_pods_of_service(
service=service,
pod_state=''))))
else:
cls.run_group_cmd(
'sudo crudini --set {} {} {} {} && sudo sync'.format(
@ -430,7 +481,7 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
if WB_CONF.openstack_type == 'podified':
service_prefix = "{} rsh {}".format(
cls.OC, cls.get_pod_of_service(service))
cls.OC, cls.get_pods_of_service(service)[0])
else:
service_prefix = ""
cmd_prefix = "crudini --get"

View File

@ -105,7 +105,7 @@ class ProviderNetworkSriovBaseTest(base.ProviderBaseTest):
# I tried to avoid hard-coding existing cell1 that exist on VA1
# podified environment since the name can probably change
# or more than one cell will be supported in the future
nova_scheduler_pod = cls.get_pod_of_service("nova-scheduler")
nova_scheduler_pod = cls.get_pods_of_service("nova-scheduler")[0]
cells = cls.proxy_host_client.exec_command(
"{} rsh {} nova-manage cell_v2 list_hosts | grep compute | "
"tr -d '|' | tr -s ' ' ".format(cls.OC, nova_scheduler_pod) + "| "
@ -113,8 +113,8 @@ class ProviderNetworkSriovBaseTest(base.ProviderBaseTest):
if len(cells) != 1:
cls.fail("Currently only environments with a single cell "
"are supported")
galera_pod = cls.get_pod_of_service(
'openstack-{}-galera-0'.format(cells[0]))
galera_pod = cls.get_pods_of_service(
'openstack-{}-galera-0'.format(cells[0])[0])
galera_db_exec = "{} rsh {}".format(cls.OC, galera_pod)
data_filter = ".data.Nova{}DatabasePassword|base64decode".format(
cells[0].capitalize())