diff --git a/whitebox_neutron_tempest_plugin/tests/scenario/base.py b/whitebox_neutron_tempest_plugin/tests/scenario/base.py index 8b79b75..e0634de 100644 --- a/whitebox_neutron_tempest_plugin/tests/scenario/base.py +++ b/whitebox_neutron_tempest_plugin/tests/scenario/base.py @@ -90,7 +90,7 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase): cls.master_node_client = cls.proxy_host_client cls.master_cont_cmd_executor = cls.proxy_host_client.exec_command cls.neutron_api_prefix = '{} rsh {} '.format( - cls.OC, cls.get_pod_of_service()) + cls.OC, cls.get_pods_of_service()[0]) else: LOG.warning(("Unrecognized deployer tool '{}', plugin supports " "openstack_type as devstack/podified.".format( @@ -342,24 +342,26 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase): return node[setting] @classmethod - def get_pod_of_service(cls, service='neutron'): - pods_list = ("{} get pods -o=name " - "--field-selector=status.phase=Running".format(cls.OC)) + def get_pods_of_service(cls, service='neutron', pod_state='Running'): + # pod_state as empty string can be used to get all pod states + if pod_state: + pod_state = "--field-selector=status.phase={}".format(pod_state) + pods_list = "{} get pods -o=name {}".format(cls.OC, pod_state) if service == 'neutron': filters = "cut -d'/' -f 2 | grep ^neutron | grep -v meta" else: filters = "cut -d'/' -f 2 | grep {}".format(service) - # return only the first result, in case of several replicas - return cls.proxy_host_client.exec_command( - "{} | {}".format(pods_list, filters)).splitlines()[0].strip() + pods_output = cls.proxy_host_client.exec_command( + "{} | {}; true".format(pods_list, filters)) + return [pod.strip() for pod in pods_output.splitlines()] @classmethod def get_configs_of_service(cls, service='neutron'): # (rsafrono) at this moment only neutron configs were handled # since it's the only service that existing tests are using if service == 'neutron': - pod = cls.get_pod_of_service(service) + pod = cls.get_pods_of_service(service)[0] return cls.proxy_host_client.exec_command( '{} rsh {} find {} -type f'.format( cls.OC, pod, os.path.split( @@ -387,19 +389,68 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase): raise cls.skipException( "Setting computes configuration not supported yet on " "podified setups (TODO).") - patch_buffer = ''' - spec: - {}: - template: - customServiceConfig: | - [{}] - {} = {}'''.format( - service, section, param, value) + service_pod = cls.get_pods_of_service(service)[0] + # TODO(mblue): process ini in python instead of crudini command, + # without depending on hardcoded conf filenames, crudini bin in pod + custom_file = '02-neutron-custom.conf' + # combine configuration to stdout using mutable copy in service pod + # NOTE(mblue): 'bash -c' needed for 'oc rsh' to execute a few + # commands in pod shell session (instead of outside session). + combine_conf_cmd = ( + "{0} rsh {1} bash -c '" + "cp /etc/neutron/neutron.conf.d/{2} /tmp/ && " + "chmod g+w /tmp/{2} && " + "crudini --set --output=- /tmp/{2} {3} {4} {5}'").format( + cls.OC, service_pod, + custom_file, section, param, value) + combined_conf = cls.proxy_host_client.exec_command( + combine_conf_cmd) + # correct indentation required for configuration to be accepted + combined_conf_ind = combined_conf.replace('\n', '\n' + 8 * ' ') + patch_buffer = ( + 'spec:\n' + ' {}:\n' + ' template:\n' + ' customServiceConfig: |\n' + ' {}\n' + ).format( + service, + combined_conf_ind) cmd = ("{0} patch $({0} get oscp -o name) --type=merge " "--patch '".format(cls.OC) + patch_buffer + "'") LOG.debug("Set configuration command:\n%s", cmd) output = cls.proxy_host_client.exec_command(cmd) LOG.debug("Output:\n%s", output) + # TODO(mblue): Add another check using network agent list + # status with neutron api (as was in downstream code). + + # wait until old service pod is fully replaced + def _service_pod_replaced(): + _service_pods = cls.get_pods_of_service( + service=service, + pod_state='') + term_service_pods = cls.get_pods_of_service( + service=service, + pod_state='Terminating') + # conditions: + # 1) any service pod listed + # 2) old service pod removed (in case replacement didn't start) + # 3) no terminating service pods (replacement finished) + return len(_service_pods) > 0 and \ + service_pod not in _service_pods and \ + len(term_service_pods) == 0 + _timeout = 120 + common_utils.wait_until_true( + _service_pod_replaced, + timeout=_timeout, + sleep=10, + exception=RuntimeError( + "'{}' pod not replaced in {} seconds:\n{}".format( + service_pod, + _timeout, + cls.get_pods_of_service( + service=service, + pod_state='')))) else: cls.run_group_cmd( 'sudo crudini --set {} {} {} {} && sudo sync'.format( @@ -430,7 +481,7 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase): if WB_CONF.openstack_type == 'podified': service_prefix = "{} rsh {}".format( - cls.OC, cls.get_pod_of_service(service)) + cls.OC, cls.get_pods_of_service(service)[0]) else: service_prefix = "" cmd_prefix = "crudini --get" diff --git a/whitebox_neutron_tempest_plugin/tests/scenario/test_sriov_provider_network.py b/whitebox_neutron_tempest_plugin/tests/scenario/test_sriov_provider_network.py index 193b382..edca285 100644 --- a/whitebox_neutron_tempest_plugin/tests/scenario/test_sriov_provider_network.py +++ b/whitebox_neutron_tempest_plugin/tests/scenario/test_sriov_provider_network.py @@ -105,7 +105,7 @@ class ProviderNetworkSriovBaseTest(base.ProviderBaseTest): # I tried to avoid hard-coding existing cell1 that exist on VA1 # podified environment since the name can probably change # or more than one cell will be supported in the future - nova_scheduler_pod = cls.get_pod_of_service("nova-scheduler") + nova_scheduler_pod = cls.get_pods_of_service("nova-scheduler")[0] cells = cls.proxy_host_client.exec_command( "{} rsh {} nova-manage cell_v2 list_hosts | grep compute | " "tr -d '|' | tr -s ' ' ".format(cls.OC, nova_scheduler_pod) + "| " @@ -113,8 +113,8 @@ class ProviderNetworkSriovBaseTest(base.ProviderBaseTest): if len(cells) != 1: cls.fail("Currently only environments with a single cell " "are supported") - galera_pod = cls.get_pod_of_service( - 'openstack-{}-galera-0'.format(cells[0])) + galera_pod = cls.get_pods_of_service( + 'openstack-{}-galera-0'.format(cells[0])[0]) galera_db_exec = "{} rsh {}".format(cls.OC, galera_pod) data_filter = ".data.Nova{}DatabasePassword|base64decode".format( cells[0].capitalize())