James Parker 1718597200 Pass hostname to NovaServiceManager
Default TripleO deployments utilize compute domain names when looking up
nova service binaries, this lookup does not work when using a compute's
control plane IP address though. To allow the recent change [1] to run
downstream, this commit updates how parameters are passed to the init of
NovaServiceManager. The hostname is passed to NovaServiceManager instead
of the IP address.  During the init the compute's control plane IP
address is determined and passed to its SSHClient. When
NovaServiceManager attempts to access nova services, it uses the
compute's provided hostname now instead of the IP address.

The get_ctlplane_address() function was moved from api.compute.base to
utils, since services.clients now needs to leverage this functionality
as well. All test case calls of get_ctlplane_address() have been updated
to use the new module path. Unit test modules test_base and test_utils
were updated to reflect these changes.

Lastly test cases interfacing with NovaServiceManager have been updated
to store both the compute's hostname as well as it's associated control
plane IP address. Originally these tests only stored the compute's
control plane address.

[1] https://review.opendev.org/#/c/736820/

Change-Id: I4d9330cf8abcb6ba3c0852e6ce3db732e468c6a5
2020-08-19 16:59:16 -04:00

99 lines
4.0 KiB
Python

# Copyright 2019 Red Hat
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute.volumes import test_attach_volume
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from whitebox_tempest_plugin.api.compute import base
from whitebox_tempest_plugin.services import clients
from whitebox_tempest_plugin import utils as whitebox_utils
CONF = config.CONF
class VolumesAdminNegativeTest(base.BaseWhiteboxComputeTest,
test_attach_volume.BaseAttachVolumeTest):
@classmethod
def skip_checks(cls):
super(VolumesAdminNegativeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(VolumesAdminNegativeTest, cls).setup_credentials()
@testtools.skipUnless(
CONF.validation.run_validation,
'ssh to instance will not work without run validation enabled.')
def test_detach_failure(self):
"""Assert that volumes remain in-use and attached after detach failure
"""
server, validation_resources = self._create_server()
# NOTE: Create one remote client used throughout the test.
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.image_ssh_user,
self.image_ssh_password,
validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
# NOTE: We need to ensure the ssh key has been injected in the
# guest before we power cycle
linux_client.validate_authentication()
disks_before_attach = linux_client.list_disks()
volume = self.create_volume()
# Attach the volume
attachment = self.attach_volume(server, volume)
waiters.wait_for_volume_resource_status(
self.volumes_client, attachment['volumeId'], 'in-use')
disks_after_attach = linux_client.list_disks()
self.assertGreater(
len(disks_after_attach),
len(disks_before_attach))
host = whitebox_utils.get_ctlplane_address(
server['OS-EXT-SRV-ATTR:host']
)
# stop the libvirt service
clients.ServiceManager(host, 'libvirt').stop()
# While this call to n-api will return successfully the underlying call
# to the virt driver will fail as the libvirt service is stopped.
self.servers_client.detach_volume(server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
self.volumes_client, attachment['volumeId'], 'in-use')
disks_after_failed_detach = linux_client.list_disks()
self.assertEqual(
len(disks_after_failed_detach), len(disks_after_attach))
# restart libvirt after failed detach
clients.ServiceManager(host, 'libvirt').restart()
# This will be a successful detach as libvirt is started again
self.servers_client.detach_volume(server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
self.volumes_client, attachment['volumeId'], 'available')
disks_after_detach = linux_client.list_disks()
self.assertEqual(len(disks_before_attach), len(disks_after_detach))