add test_sequentially_hard_reboot_controllers_recovery test
Change-Id: I4e580b2859dda32272edbb09b2c65f1b2d7952a1
This commit is contained in:
parent
12db48915f
commit
bb6f4c4faf
@ -10,6 +10,7 @@ from tobiko.tripleo import topology as tripleo_topology
|
|||||||
from tobiko.openstack import keystone
|
from tobiko.openstack import keystone
|
||||||
from tobiko.tripleo import pacemaker
|
from tobiko.tripleo import pacemaker
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
from tobiko.tests.faults.ha import test_cloud_recovery
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -83,8 +84,16 @@ def reset_all_controller_nodes(disrupt_method=hard_reset_method,
|
|||||||
exclude_list=exclude_list)
|
exclude_list=exclude_list)
|
||||||
|
|
||||||
|
|
||||||
|
def reset_all_controller_nodes_sequentially(disrupt_method=hard_reset_method,
|
||||||
|
sequentially=True,
|
||||||
|
exclude_list=None):
|
||||||
|
disrupt_all_controller_nodes(disrupt_method=disrupt_method,
|
||||||
|
sequentially=sequentially,
|
||||||
|
exclude_list=exclude_list)
|
||||||
|
|
||||||
|
|
||||||
def disrupt_all_controller_nodes(disrupt_method=hard_reset_method,
|
def disrupt_all_controller_nodes(disrupt_method=hard_reset_method,
|
||||||
exclude_list=None):
|
sequentially=False, exclude_list=None):
|
||||||
# reboot all controllers and wait for ssh Up on them
|
# reboot all controllers and wait for ssh Up on them
|
||||||
# method : method of disruptino to use : reset | network_disruption
|
# method : method of disruptino to use : reset | network_disruption
|
||||||
# hard reset is simultaneous while soft is sequential
|
# hard reset is simultaneous while soft is sequential
|
||||||
@ -105,12 +114,11 @@ def disrupt_all_controller_nodes(disrupt_method=hard_reset_method,
|
|||||||
LOG.info('disrupt exec: {} on server: {}'.format(disrupt_method,
|
LOG.info('disrupt exec: {} on server: {}'.format(disrupt_method,
|
||||||
controller.name))
|
controller.name))
|
||||||
tobiko.cleanup_fixture(controller.ssh_client)
|
tobiko.cleanup_fixture(controller.ssh_client)
|
||||||
|
if sequentially:
|
||||||
for controller in topology.list_openstack_nodes(group='controller'):
|
test_cloud_recovery.check_overcloud_node_responsive(controller)
|
||||||
controller_checked = sh.execute("hostname",
|
if not sequentially:
|
||||||
ssh_client=controller.ssh_client,
|
for controller in topology.list_openstack_nodes(group='controller'):
|
||||||
expect_exit_status=None).stdout
|
test_cloud_recovery.check_overcloud_node_responsive(controller)
|
||||||
LOG.info('{} is up '.format(controller_checked))
|
|
||||||
|
|
||||||
|
|
||||||
def get_main_vip():
|
def get_main_vip():
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
import testtools
|
import testtools
|
||||||
from tobiko.shell import ping
|
from tobiko.shell import ping
|
||||||
@ -16,6 +17,9 @@ from tobiko.openstack import stacks
|
|||||||
import tobiko
|
import tobiko
|
||||||
|
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def overcloud_health_checks(passive_checks_only=False):
|
def overcloud_health_checks(passive_checks_only=False):
|
||||||
# this method will be changed in future commit
|
# this method will be changed in future commit
|
||||||
check_pacemaker_resources_health()
|
check_pacemaker_resources_health()
|
||||||
@ -51,6 +55,13 @@ def check_vm_create(stack_name='stack{}'.format(random.randint(0, 1000000))):
|
|||||||
stack.floating_ip_address).assert_replied()
|
stack.floating_ip_address).assert_replied()
|
||||||
|
|
||||||
|
|
||||||
|
def check_overcloud_node_responsive(node):
|
||||||
|
"""wait until we get response for hostname command"""
|
||||||
|
hostname_check = sh.execute("hostname", ssh_client=node.ssh_client,
|
||||||
|
expect_exit_status=None).stdout
|
||||||
|
LOG.info('{} is up '.format(hostname_check))
|
||||||
|
|
||||||
|
|
||||||
# check cluster failed statuses
|
# check cluster failed statuses
|
||||||
def check_pacemaker_resources_health():
|
def check_pacemaker_resources_health():
|
||||||
return pacemaker.PacemakerResourcesStatus().all_healthy
|
return pacemaker.PacemakerResourcesStatus().all_healthy
|
||||||
@ -80,6 +91,11 @@ class RebootTripleoNodesTest(testtools.TestCase):
|
|||||||
cloud_disruptions.reset_all_controller_nodes()
|
cloud_disruptions.reset_all_controller_nodes()
|
||||||
overcloud_health_checks()
|
overcloud_health_checks()
|
||||||
|
|
||||||
|
def test_sequentially_hard_reboot_controllers_recovery(self):
|
||||||
|
overcloud_health_checks()
|
||||||
|
cloud_disruptions.reset_all_controller_nodes_sequentially()
|
||||||
|
overcloud_health_checks()
|
||||||
|
|
||||||
def test_reboot_computes_recovery(self):
|
def test_reboot_computes_recovery(self):
|
||||||
overcloud_health_checks()
|
overcloud_health_checks()
|
||||||
cloud_disruptions.reset_all_compute_nodes(hard_reset=True)
|
cloud_disruptions.reset_all_compute_nodes(hard_reset=True)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user