Retire stackforge/haos

This commit is contained in:
Monty Taylor 2015-10-17 16:03:09 -04:00
parent 7e3a9301f0
commit acd72879f7
57 changed files with 7 additions and 2512 deletions

21
.gitignore vendored
View File

@ -1,21 +0,0 @@
*~
*.pyc
*.local
AUTHORS
ChangeLog
MANIFEST
dist/
.venv/
build/*
build-stamp
cover/*
doc/build/
doc/source/api/
*.egg-info
*.egg
.autogenerated
.coverage
.testrepository/
.tox/
.installed
.idea*

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/haos.git

View File

@ -1,36 +0,0 @@
High Availability OpenStack (HAOS)
==================================
Introduction
------------
HAOS is a suite of HA/destructive tests for OpenStack clouds. These tests
are written as Rally plugins and are executed by Rally and in
parallel with the load/performance tests to simulate some disaster/failover
scenarios with the OpenStack clouds. HAOS uses HAOS agent for remote execution
of commands on OpenStack nodes and virtual machines in the cloud.
How to install
--------------
1. Clone the repository:
``git clone git://git.openstack.org/stackforge/haos``
2. Make sure that ``sshpass`` is installed - for example, on Ubuntu execute the following command: ``sudo apt-get install sshpass``
3. Edit etc/openrc.local file, set IP addresses, credentials and parameters for your cloud
4. Import ``openrc`` into your environment by doing
``source etc/openrc.local``
5. Run tox:
``tox -e run``
How to run tests
----------------
Run scenario with the command:
``tox -e run <scenario>``
How to run tests on MOS environments
------------------------------------
Run scenario with the command:
``tox -e run-for-mos <scenario>``

7
README.rst Normal file
View File

@ -0,0 +1,7 @@
This project is no longer maintained.
The contents of this repository are still available in the Git source code
management system. To see the contents of this repository before it reached
its end of life, please check out the previous commit with
"git checkout HEAD^1".

View File

@ -1,23 +0,0 @@
#!/bin/sh
export OS_NO_CACHE='true'
export OS_TENANT_NAME='admin'
export OS_USERNAME='admin'
export OS_PASSWORD='admin'
export OS_AUTH_STRATEGY='keystone'
export OS_REGION_NAME='RegionOne'
export CINDER_ENDPOINT_TYPE='publicURL'
export GLANCE_ENDPOINT_TYPE='publicURL'
export KEYSTONE_ENDPOINT_TYPE='publicURL'
export NOVA_ENDPOINT_TYPE='publicURL'
export NEUTRON_ENDPOINT_TYPE='publicURL'
export OS_ENDPOINT_TYPE='publicURL'
export MURANO_REPO_URL='http://catalog.openstack.org/'
export OS_AUTH_URL=
export FUEL_HOST=
export FUEL_USERNAME='root'
export FUEL_PASSWORD='r00tme'
export HAOS_IMAGE="haos-image"
export HAOS_FLAVOR="haos-flavor"
export HAOS_SERVER_ENDPOINT=

View File

@ -1 +0,0 @@
__author__ = 'kkuznetsova'

View File

@ -1 +0,0 @@
__author__ = 'kkuznetsova'

View File

@ -1 +0,0 @@
__author__ = 'kkuznetsova'

View File

@ -1,79 +0,0 @@
import os
from rally.benchmark.context import base
from rally.benchmark.context.cleanup import manager as resource_manager
from rally.common import log as logging
from rally import consts
from rally import exceptions
from haos.remote import server
from haos.remote import ssh_remote_control
LOG = logging.getLogger(__name__)
@base.context(name="cloud_nodes", order=800)
class CloudNodesContext(base.Context):
"""This context allows to define the list of nodes in the cloud."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"controllers": {
"type": "array",
"default": []
},
"power_control_node": {
"type": "object",
"default": {}
},
"remote_control_type": {
"type": "string",
"default": "ssh"
}
}
}
def setup(self):
"""This method is called before the task start."""
self.context["controllers"] = self.config.get("controllers")
remote_control_type = self.config.get("remote_control_type")
self.context["remote_control_type"] = remote_control_type
power_control_node = self.config.get("power_control_node")
self.context["power_control_node"] = power_control_node
env_vars = {
'HAOS_SERVER_ENDPOINT': None,
'HAOS_IMAGE': None,
'HAOS_FLAVOR': None,
'HAOS_JOIN_TIMEOUT': 100,
'HAOS_COMMAND_TIMEOUT': 10
}
for var, def_value in env_vars.items():
value = os.environ.get(var) or def_value
if value:
self.context[var.lower()] = value
else:
LOG.debug('Env var %s must be set'.format(var))
if self.context["remote_control_type"] == "ssh":
ssh = ssh_remote_control.SSHConnection()
self.context["haos_remote_control"] = ssh.remote_control
elif self.context["remote_control_type"] == "haos_agents":
boss_inst = server.Server(self.context["haos_server_endpoint"])
self.context["haos_remote_control"] = boss_inst.remote_control
else:
msg = "remote_control_type {0} doesn't implemented yet.".format(
self.context["remote_control_type"]
)
raise exceptions.RallyException(msg)
def cleanup(self):
"""This method is called after the task finish."""
self.context["controllers"] = []
resource_manager.cleanup(names=["nova.servers"],
users=self.context.get("users", []))

View File

@ -1,28 +0,0 @@
from rally.benchmark.context import base
from rally import consts
@base.context(name="recover_cloud", order=900)
class CloudNodesContext(base.Context):
"""This context allows to recover cloud after disaster tests."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"checks": {
"type": "array",
"default": []
}
}
}
def setup(self):
"""This method is called before the task start."""
self.context["recover_commands"] = []
self.context["checks"] = self.config.get("checks", [])
def cleanup(self):
"""This method is called after the task finish."""
pass

View File

@ -1 +0,0 @@
__author__ = 'kkuznetsova'

View File

@ -1,317 +0,0 @@
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import time
from rally.benchmark.scenarios.neutron import utils as neutron_utils
from rally.benchmark.scenarios.nova import utils as nova_utils
from rally.benchmark.scenarios.vm import utils as vm_utils
from rally.benchmark import types
from rally.common import log as logging
import testtools
from haos.rally import utils
LOG = logging.getLogger(__name__)
class BaseDisaster(neutron_utils.NeutronScenario,
nova_utils.NovaScenario,
vm_utils.VMScenario,
testtools.TestCase):
def wait_shaker_agent(self, agent_id, timeout=300):
result = utils.run_command(self.context, agent_id, "hostname",
executor="shaker", timeout=timeout)
LOG.debug(result)
def boot_server(self, name, nics=None):
USER_DATA = """#!/bin/bash
shaker-agent --agent-id %(agent_id)s \
--server-endpoint %(server_endpoint)s --debug \
--log-file /var/log/shaker.log
"""
shaker_endpoint = self.context['shaker_endpoint']
userdata = USER_DATA % dict(agent_id=name,
server_endpoint=shaker_endpoint)
kwargs = {"userdata": userdata}
if nics is not None:
kwargs['nics'] = nics
vm = self._boot_server(name=name,
image_id=self.context["shaker_image"],
flavor_id=self.context["default_flavor"],
auto_assign_nic=True,
**kwargs)
self.wait_shaker_agent(name, timeout=850)
return vm
def boot_server_with_agent(self, network_id):
flavor_id = types.FlavorResourceType.transform(
clients=self._clients,
resource_config={'name': self.context["haos_flavor"]})
image_id = types.ImageResourceType.transform(
clients=self._clients,
resource_config={'name': self.context["haos_image"]})
kwargs = {'nics': [{"net-id": network_id}]}
server = self._boot_server(image_id=image_id,
flavor_id=flavor_id, auto_assign_nic=True,
**kwargs)
# extend server instance with helpers
server.get_agent_id = functools.partial(utils.get_server_agent_id,
server=server)
# wait for agent to become active
timeout = time.time() + self.context['haos_join_timeout']
agent_id = server.get_agent_id()
active = None
while not active and time.time() < timeout:
active = self.run_remote_command(agent_id, 'hostname')
self.assertIsNotNone(active, 'Server should is expected to be alive')
LOG.info('Server %s is up and agent is running', server.name)
return server
def power_off_controller(self, controller):
control_node = self.context["power_control_node"]
utils.run_command(self.context, control_node["agent_endpoint"],
command=controller["hardware_power_off_cmd"],
recover_command=controller["hardware_power_on_cmd"],
recover_timeout=controller["power_on_timeout"])
time.sleep(controller["power_off_timeout"])
# This function creates router, network, subnet and joins them
def create_network_subnet_router(self):
self._clients = self._admin_clients
router = self._create_router({}, external_gw=True)
network, subnets = self._create_network_and_subnets()
self._add_interface_router(subnets[0]["subnet"], router["router"])
return network, subnets, router
# This function associate floating IP for delivered VM
def associate_floating_ip(self, server=None):
self._clients = self._admin_clients
nets = self._list_networks()
for network in nets:
if network["router:external"]:
external_network = network
self._attach_floating_ip(server, external_network)
return
# This function define floating IP for delivered VM and name of network
def define_floating_ip_for_vm(self, vm, net_name):
# vm - instance: type(vm) = <class 'novaclient.v2.servers.Server'>
# net_name - name of network on which we boot vm
addresses = vm.addresses[net_name]
for address in addresses:
if address["OS-EXT-IPS:type"] == 'floating':
return address["addr"]
return None
# This function define internal-fixed IP
# for delivered VM and name of network
def define_fixed_ip_for_vm(self, vm, net_name):
# vm - instance: type(vm) = <class 'novaclient.v2.servers.Server'>
# net_name - name of network on which we boot vm
addresses = vm.addresses[net_name]
for address in addresses:
if address["OS-EXT-IPS:type"] == 'fixed':
return address["addr"]
return None
# This function from server ping adress_ip
def check_connectivity(self, server, adress_ip):
# server - server where we try to ping
# address_ip - what ping
command = "ping -W 5 -c1 %s 1>/dev/null;echo $?" % adress_ip
output = self.run_remote_command(server, command)
return output and output[0] == "0"
# function: get node for l3-agent
# on what the current router is with neutron API
def get_node_on_what_is_agent_for_router(self, router):
# router - router with type NeutronClient
router_id = router["router"]["id"]
neutron_client = self.clients("neutron")
agents = neutron_client.list_l3_agent_hosting_routers(router_id)
for agent in agents["agents"]:
return agent['host']
raise "Router hasn't any l3-agent"
# Add tcp rule for 22 port and icmp rule
def add_rules_for_ping(self):
# self._clients = self._admin_clients
sec_groups = self._list_security_groups()
self.clients("nova").security_group_rules.create(
sec_groups[0].id,
from_port=22,
to_port=22,
ip_protocol="tcp",
cidr="0.0.0.0/0")
self.clients("nova").security_group_rules.create(
sec_groups[0].id,
from_port=-1,
to_port=-1,
ip_protocol="icmp",
cidr="0.0.0.0/0")
# Get list agents, only dhcp
def get_list_dhcp_agents(self):
list_agents = self.clients("neutron").list_agents()
list_dhcp_agents = []
for agent in list_agents["agents"]:
if agent["agent_type"] == "DHCP agent":
list_dhcp_agents.append(agent)
return list_dhcp_agents
# Get list agents, only l3
def get_list_l3_agents(self):
list_agents = self.clients("neutron").list_agents()
list_l3_agents = []
for agent in list_agents["agents"]:
if agent["agent_type"] == "L3 agent":
list_l3_agents.append(agent)
return list_l3_agents
# Get dhcp agent for chosen network on chosen node
def get_dhcp_on_chosen_node(self, node, net_id):
"""Reschedule net to agent on the chosen node if it doesn't on it yet
:param node: controller, om which agent reascheduling is needed
:param net_id: id of network which we should check
"""
neutron_client = self.clients("neutron")
dhcp_agents = neutron_client.list_dhcp_agent_hosting_networks(net_id)
need_manually_rescheduling = True
for agent in dhcp_agents["agents"]:
if agent["host"] == node:
need_manually_rescheduling = False
break
if need_manually_rescheduling:
first_dhcp_agent_id = dhcp_agents["agents"][0]["id"]
neutron_client.remove_network_from_dhcp_agent(first_dhcp_agent_id,
net_id)
list_dhcp_agents = self.get_list_dhcp_agents()
need_agent = None
for agent in list_dhcp_agents:
if agent["host"] == node:
need_agent = agent
break
if need_agent:
agent_id = need_agent['id']
body = {"network_id": net_id}
neutron_client.add_network_to_dhcp_agent(dhcp_agent=agent_id,
body=body)
else:
raise
def get_l3_on_chosen_node(self, node, router_id):
"""Get l3 agent for chosen router on chosen node.
:param node: controller node on which should be router
:param router_id: id of chosen router which should be rescheduling
:return: None
"""
neutron_client = self.clients("neutron")
l3_agents = neutron_client.list_l3_agent_hosting_routers(router_id)
need_manually_rescheduling = True
for agent in l3_agents["agents"]:
if agent["host"] == node:
need_manually_rescheduling = False
break
if need_manually_rescheduling:
first_l3_agent_id = l3_agents["agents"][0]["id"]
neutron_client.remove_router_from_l3_agent(first_l3_agent_id,
router_id)
list_l3_agents = self.get_list_l3_agents()
need_agent = None
for agent in list_l3_agents:
if agent["host"] == node:
need_agent = agent
break
if need_agent:
agent_id = need_agent['id']
body = {"router_id": router_id}
neutron_client.add_router_to_l3_agent(l3_agent=agent_id,
body=body)
else:
raise
def check_reschedule_for_l3_on_node(self, node):
"""Check that routers reschedule from agents on node
:param node: node controller on which rescheduling is being checked
"""
list_l3_agents = self.get_list_l3_agents()
l3_for_node = None
for l3_agent in list_l3_agents:
if (l3_agent["host"] == node):
l3_for_node = l3_agent
if (l3_for_node is not None):
list_routers = self.clients(
"neutron").list_routers_on_l3_agent(l3_for_node["id"])
if len(list_routers) != 0:
raise
else:
raise
def check_reschedule_for_dhcp_on_node(self, node):
"""Check that networks and routers reschedule from agents on node
:param node: node controller on which rescheduling is being checked
"""
list_dhcp_agents = self.get_list_dhcp_agents()
dhcp_for_node = None
for dhcp_agent in list_dhcp_agents:
if (dhcp_agent["host"] == node):
dhcp_for_node = dhcp_agent
if (dhcp_for_node is not None):
list_networks = self.clients(
"neutron").list_networks_on_dhcp_agent(dhcp_for_node["id"])
if len(list_networks) != 0:
raise
else:
raise
def pick_network_id(self):
networks = self.context["tenant"].get("networks")
self.assertTrue(len(networks) >= 1,
'At least one network is expected in the tenant')
return networks[0]['id']
def kill_remote_process(self, host, process_name):
LOG.info('Kill process %s at host %s', process_name, host)
cmd = ("ps aux | grep '%s' | grep -v grep | awk '{print $2}'" %
process_name)
pid = self.run_remote_command(host, cmd)
LOG.debug('process pid: %s', pid)
self.run_remote_command(host, 'kill -9 %s' % pid)
def run_remote_command(self, host, command, timeout=None):
timeout = timeout or self.context.get('haos_command_timeout')
return self.context.get('haos_remote_control')(host, command, timeout)

View File

@ -1,250 +0,0 @@
# coding=utf-8
from rally.benchmark.scenarios import base
from haos.rally.plugin import base_disaster
from rally.common import log as logging
import time
LOG = logging.getLogger(__name__)
class NeutronL3Disaster(base_disaster.BaseDisaster):
def get_node_on_what_is_agent_for_router(self, router_id):
"""Return node on what is l3 agent for received router
:param router_id: id router for which find agent node
:return: name of node
"""
neutron_client = self.clients("neutron")
agents = neutron_client.list_l3_agent_hosting_routers(router_id)
if len(agents) == 0:
raise "Router hasn't any l3-agent"
return agents['agents'][0]['host']
def ban_l3_agent_on_node(self, node):
"""Ban l3 agent on the received node
:param node: controller on which we should ban l3-agent
:return:
"""
command = "pcs resource ban p_neutron-l3-agent " + node
output = self.run_remote_command(node, command)
return output
# TODO(sbelous): write function wait some time
def wait_some_time(self):
pass
@base.scenario()
def ban_one_l3_agent(self):
"""Ban one Neutron L3 agent and verify cloud
Setup:
OpenStack cloud with at least 3 controllers 16
Scenario:
1. Define network1, networks2, which was created by install context
2. Define router1 and router2 id, which was also created by install
context
3. Boot vm1 in network1 and associate floating ip
4. Boot vm2 in network2 and associate floating ip
5. Add rules for ping
6. ping vm1 and vm2 from each other with floatings ip
7. get node with l3 agent on what is router1
8. ban this l3 agent on the node with pcs
9. wait some time
10. Boot vm3 in network1 and associate floating ip
11. ping vm1 and vm3 from each other with internal ip
12. ping vm2 and vm1 from each other with floating ip
13. ping vm2 and vm3 from each othe with floating ip
"""
# for test we need 2 networks in context
quantity_of_networks_for_test = 2
networks = self.context["tenant"].get("networks")
if networks is None:
message = "Networks haven't been created with context for the " \
"test ban_one_l3_agent"
LOG.debug(message)
raise
if len(networks) < quantity_of_networks_for_test:
message = "Haven't enough networks for the test ban_one_l3_agent"
LOG.debug(message)
raise
network1 = networks[0]
network2 = networks[1]
print("net1 = " + network1['name'])
print("net2 = " + network2['name'])
router1_id = network1.get("router_id")
print("router1 = " + router1_id)
net1_id = network1["id"]
net2_id = network2["id"]
vm1 = self.boot_server_with_agent(net1_id)
vm2 = self.boot_server_with_agent(net2_id)
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self._attach_floating_ip(vm1, "net04_ext")
self._attach_floating_ip(vm2, "net04_ext")
# Define internal IP and floating IP
net1_name = network1["name"]
net2_name = network2["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Check connectivity
self.check_connectivity(vm2.get_agent_id(), "8.8.8.8")
self.check_connectivity(vm1.get_agent_id(), vm2_floating_ip)
self.check_connectivity(vm2.get_agent_id(), vm1_floating_ip)
# Check on what agents are router1 and ban this agent
node_with_agent = self.get_node_on_what_is_agent_for_router(router1_id)
self.ban_l3_agent_on_node(node=node_with_agent)
# TODO(sbelous): wait some time
self.wait_some_time()
self.check_reschedule_for_l3_on_node(node=node_with_agent)
vm3 = self.boot_server_with_agent(net1_id)
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
# Check connectivity
self.check_connectivity(vm3.get_agent_id(), "8.8.8.8")
self.check_connectivity(vm1.get_agent_id(), vm3_internal_ip)
self.check_connectivity(vm3.get_agent_id(), vm1_internal_ip)
self.check_connectivity(vm1.get_agent_id(), vm2_floating_ip)
self.check_connectivity(vm2.get_agent_id(), vm1_floating_ip)
self.check_connectivity(vm3.get_agent_id(), vm2_floating_ip)
@base.scenario()
def ban_some_l3_agents(self):
"""Ban some l3 agents
Setup:
OpenStack cloud with at least 3 controllers
Scenario:
4) Check ping 8.8.8.8
5) Ping each other with floatings ip
6) Ban l3-agents on which this new routers are
7) Boot one more vm3 in the first net
8) Check ping 8.8.8.8
9) Ping vm1, vm2 and vm3 with their floatings ip
10) from vm3 ping vm1 by internal ip
Scenario:
1. Define network1, networks2, which was created by install context
2. Define router1 and router2 id, which was also created by install
context
3. Boot vm1 in network1 and associate floating ip
4. Boot vm2 in network2 and associate floating ip
5. Add rules for ping
6. ping vm1 and vm2 from each other with floatings ip
7. get node with l3 agent on what is router1
8. ban this l3 agent on the node with pcs
9. wait some time
10. Boot vm3 in network1 and associate floating ip
11. ping vm1 and vm3 from each other with internal ip
12. ping vm2 and vm1 from each other with floating ip
13. ping vm2 and vm3 from each othe with floating ip
"""
# for test we need 2 networks in context
quantity_of_networks_for_test = 2
networks = self.context["tenant"].get("networks")
if networks is None:
message = "Networks haven't been created with context for the " \
"test ban_one_l3_agent"
LOG.debug(message)
raise
if len(networks) < quantity_of_networks_for_test:
message = "Haven't enough networks for the test ban_one_l3_agent"
LOG.debug(message)
raise
network1 = networks[0]
network2 = networks[1]
print("net1 = " + network1['name'])
print("net2 = " + network2['name'])
router1_id = network1.get("router_id")
print("router1 = " + router1_id)
net1_id = network1["id"]
net2_id = network2["id"]
vm1 = self.boot_server_with_agent(net1_id)
vm2 = self.boot_server_with_agent(net2_id)
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self._attach_floating_ip(vm1, "net04_ext")
self._attach_floating_ip(vm2, "net04_ext")
# Define internal IP and floating IP
net1_name = network1["name"]
net2_name = network2["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Check connectivity
self.check_connectivity(vm2.get_agent_id(), "8.8.8.8")
self.check_connectivity(vm1.get_agent_id(), vm2_floating_ip)
self.check_connectivity(vm2.get_agent_id(), vm1_floating_ip)
quantity_of_l3_agents = len(self.get_list_l3_agents())
node_with_banned_l3_agents = []
print(self.get_list_l3_agents())
for i in xrange(quantity_of_l3_agents - 1):
# Check on what agents are router1 and ban this agent
node_with_agent = self.get_node_on_what_is_agent_for_router(
router1_id)
node_with_banned_l3_agents.append(node_with_agent)
self.ban_l3_agent_on_node(node=node_with_agent)
# TODO(sbelous): wait some time
self.wait_some_time()
time.sleep(30)
if node_with_banned_l3_agents is None:
raise
for node_with_banned_agent in node_with_banned_l3_agents:
self.check_reschedule_for_l3_on_node(node_with_banned_agent)
vm3 = self.boot_server_with_agent(net1_id)
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
# Check connectivity
self.check_connectivity(vm3.get_agent_id(), "8.8.8.8")
self.check_connectivity(vm1.get_agent_id(), vm3_internal_ip)
self.check_connectivity(vm3.get_agent_id(), vm1_internal_ip)
self.check_connectivity(vm1.get_agent_id(), vm2_floating_ip)
self.check_connectivity(vm2.get_agent_id(), vm1_floating_ip)
self.check_connectivity(vm3.get_agent_id(), vm2_floating_ip)

View File

@ -1,71 +0,0 @@
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import re
from rally.benchmark.scenarios import base
from rally.benchmark import validation
from rally.common import log as logging
from rally import consts
from haos.rally.plugin import base_disaster
LOG = logging.getLogger(__name__)
OBTAIN_IP = 'sudo /sbin/udhcpc -n 2>/dev/null | grep obtained'
class NeutronDHCPDisaster(base_disaster.BaseDisaster):
def _obtain_ip_address(self, server):
server_agent_id = server.get_agent_id()
LOG.debug('Server agent id: %s', server_agent_id)
obtain_out = self.run_remote_command(server_agent_id, OBTAIN_IP)
if obtain_out:
ip = re.findall('\d+\.\d+\.\d+\.\d+', obtain_out)[0]
LOG.info('Server IP is obtained: %s', ip)
return ip
@validation.required_services(consts.Service.NOVA, consts.Service.NEUTRON)
@validation.required_openstack(users=True)
@base.scenario(context={'cleanup': ['nova'],
'keypair': {}, 'allow_ssh': {}})
def kill_dhcp_agent(self, **kwargs):
network_id = self.pick_network_id()
server = self.boot_server_with_agent(network_id)
# obtain IP address
ip1 = self._obtain_ip_address(server)
self.assertIsNotNone(
ip1, 'Instance should be able to obtain IP from DHCP')
# choose controller
agents = self.admin_clients(
'neutron').list_dhcp_agent_hosting_networks(network_id)['agents']
controller = random.choice(agents)['host']
# kill dhcp agent
self.kill_remote_process(controller, 'dhcp-agent')
# retrieve IP once again
ip2 = self._obtain_ip_address(server)
self.assertIsNotNone(
ip2, 'Instance should be able to obtain IP from DHCP')
self.assertEqual(ip1, ip2, 'DHCP should return the same IP')

View File

@ -1,610 +0,0 @@
from rally.benchmark.scenarios import base
from haos.rally.plugin import base_disaster
from haos.rally import utils
from rally.common import log as logging
import time
LOG = logging.getLogger(__name__)
class NeutronDisaster(base_disaster.BaseDisaster):
def check_all_reschedule(self, node):
"""Check that networks and routers reschedule from agents on node
:param node: node controller on which rescheduling is being checked
"""
self.check_reschedule_for_dhcp_on_node(node=node)
self.check_reschedule_for_l3_on_node(node=node)
def find_primary_controller(self):
"""Find primary controller with command hierra role
(if controller node is primary this command return primary-controller)
:return: agent endpoint for the node which is primary controller
"""
for controller in self.context["controllers"]:
node = controller["agent_endpoint"]
command = "hiera role"
result = utils.run_command(self.context, node, command=command,
executor="shaker")
if "primary-controller" in result:
return node
return None
def find_non_primary_controller(self):
"""Find non primary controller
:return: the first non primary controller in the list of controllers
or raise
"""
primary_controller = self.find_primary_controller()
non_primary_context_controller = None
for controller in self.context["controllers"]:
if controller["agent_endpoint"] != primary_controller:
non_primary_context_controller = controller
return non_primary_context_controller
if non_primary_context_controller is None:
message = "Can't define non primary controller"
LOG.debug(message)
raise
# TODO(sbelous): write function wait some time
def wait_some_time(self):
pass
@base.scenario()
def drop_mysql_port(self):
"""Drop mysql port
Setup:
OpenStack cloud with at least 3 controllers 16
Scenario:
1. Create router1, net1 and subnetwork1 and join router1 with net1
2. Create router2, net2 and subnetwork2 and join router2 with net2
3. Start vm1 in network1
4. Start vm2 in network2
5. Define floating ip for vm1 and vm2
6. Define internal ip for vm1
7. Add rules for ping
8. ping 8.8.8.8 from vm2
9. ping vm1 from vm2 and vm1 from vm2
10. Run udhcp on vm1
11. Make l3-agent for router1 and dhcp-agent for net1 on the same node
12. drop rabbit port 3306 on node, where is l3-agent for router1
13. Boot vm3 in network1
14. ping 8.8.8.8 from vm3
15. ping between vm1 and vm3 by internal ip
16. ping between vm2 and vm3 by floating ip
17. Run udhcp on vm1 and vm3
"""
# Add rules to be able ping
self.add_rules_for_ping()
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Check on what agents are router1
node = self.get_node_on_what_is_agent_for_router(router1)
self.get_dhcp_on_chosen_node(node, network1)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Drop rabbit MQ port
command = "iptables -I INPUT 1 -p tcp --dport 3306 -j DROP"
utils.run_command(self.context, node, command=command)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
vm3_floating_ip = self.define_floating_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
output = utils.run_command(self.context, "VM3", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM2", vm3_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
self.check_connectivity("VM1", vm3_floating_ip)
self.check_connectivity("VM3 ", vm1_floating_ip)
@base.scenario()
def reboot_primary_controller(self):
"""Reboot primary controller
Setup:
OpenStack cloud with at least 3 controllers and 1 compute
Scenario:
1. Create router1, net1 and subnet1 and join router1 with net1
2. Create router2, net2 and subnet2 and join router2 with net2
3. Start vm1 in net1
4. Start vm2 in net2
5. Define floating ip for vm1 and vm2
6. Define internal ip for vm1
7. Add rules for ping
8. Find primary controller
9. Get l3 agent for router1 and dhcp-agent for net1
on primary controller
10. ping 8.8.8.8 from vm2
11. ping vm1 from vm2 and vm1 from vm2
12. Run udhcp on vm1
13. Reboot primary controller
14. Wait some time
15. Boot vm3 in net1
16. ping 8.8.8.8 from vm3
17. ping between vm1 and vm3 by internal ip
18. ping between vm2 and vm3 by floating ip
19. Run udhcp on vm1 and vm3
"""
quantity_of_networks_for_test = 2
networks = self.context["tenant"].get("networks")
if networks is None:
message = "Networks haven't been created with context"
LOG.debug(message)
raise
if len(networks) < quantity_of_networks_for_test:
message = "Haven't enough networks for the test"
LOG.debug(message)
raise
network1 = networks[0]
network2 = networks[1]
router1_id = network1.get("router_id")
net1_id = network1["id"]
net2_id = network2["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self._attach_floating_ip(vm1, "net04_ext")
self._attach_floating_ip(vm2, "net04_ext")
# Define internal IP and floating IP
net1_name = network1["name"]
net2_name = network2["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Find primary controller
primary_context_controller = None
primary_controller = self.find_primary_controller()
for controller in self.context["controllers"]:
if controller['agent_endpoint'] == primary_controller:
primary_context_controller = controller
if primary_context_controller in None:
raise
# Get l3 agent for router1 and one dhcp agent for network1
# on non primary controller
self.get_dhcp_on_chosen_node(primary_controller, net1_id)
self.get_l3_on_chosen_node(primary_controller, router1_id)
# dhcp work
output = utils.run_command(self.context, "VM1", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.power_off_controller(primary_context_controller)
# TODO(sbelous): wait some time
self.wait_some_time()
self.check_all_reschedule(primary_controller)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
output = utils.run_command(self.context, "VM3", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
@base.scenario()
def drop_rabbit_port(self):
"""Drop rabbit port
Setup:
OpenStack cloud with at least 3 controllers 16
Scenario:
1. Create router1, net1 and subnet1 and join router1 with net1
2. Create router2, net2 and subnet2 and join router2 with net2
3. Start vm1 in net1
4. Start vm2 in net2
5. Define floating ip for vm1 and vm2
6. Define internal ip for vm1
7. Add rules for ping
8. ping 8.8.8.8 from vm2
9. ping vm1 from vm2 and vm1 from vm2
10. Run udhcp on vm1
11. Make l3-agent for router1 and one dhcp-agent for net1
on the same node
12. drop rabbit port 5673 on node, where is l3-agent for router1
13. ping 8.8.8.8 from vm1
17. ping between vm2 and vm1 by floating ip
18. Run udhcp on vm1
"""
# Add rules to be able ping
self.add_rules_for_ping()
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Check on what agents are router1
node = self.get_node_on_what_is_agent_for_router(router1)
self.get_dhcp_on_chosen_node(node, network1)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Drop rabbit MQ port
command = "iptables -I OUTPUT 1 -p tcp --dport 5673 -j DROP"
utils.run_command(self.context, node, command=command,
executor="shaker")
command = "iptables -I INPUT 1 -p tcp --dport 5673 -j DROP"
utils.run_command(self.context, node, command=command,
executor="shaker")
# TODO(kkuznetsova): make function waiting some time
# while scheduling is working
time.sleep(10)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM1", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
@base.scenario()
def destroy_primary_controller(self):
"""Shut destroy primary controller
Scenario:
1. Create network1, subnets1, router1
2. Create network2, subnets2, router2
2. Launch 2 instances (vm1 and vm2) and associate floating ip
3. Add rules for ping
4. Find primary controller
5. Rescedule network1 and router1 for the primary controller
6. ping 8.8.8.8 from vm2
7. ping vm1 from vm2 and vm1 from vm2
8. Run udhcp on vm1
9. Destroy primary controller
(virsh destroy <primary_controller>)
10. Wait some time
11. Check that all networks and routers rescedule
from primary controller
11. Boot vm3 in network1
12. ping 8.8.8.8 from vm3
13. ping between vm1 and vm3 by internal ip
14. ping between vm1 and vm2 by floating ip
15. Run udhcp on vm1 and vm3
"""
quantity_of_networks_for_test = 2
networks = self.context["tenant"].get("networks")
if networks is None:
message = "Networks haven't been created with context"
LOG.debug(message)
raise
if len(networks) < quantity_of_networks_for_test:
message = "Haven't enough networks for the test"
LOG.debug(message)
raise
network1 = networks[0]
network2 = networks[1]
router1_id = network1.get("router_id")
net1_id = network1["id"]
net2_id = network2["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self._attach_floating_ip(vm1, "net04_ext")
self._attach_floating_ip(vm2, "net04_ext")
# Define internal IP and floating IP
net1_name = network1["name"]
net2_name = network2["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Find primary controller
primary_controller = self.find_primary_controller()
for controller in self.context["controllers"]:
if controller['agent_endpoint'] == primary_controller:
primary_context_controller = controller
# Get l3 agent for router1 and one dhcp agent for network1
# on non primary controller
self.get_dhcp_on_chosen_node(primary_controller, net1_id)
self.get_l3_on_chosen_node(primary_controller, router1_id)
# dhcp work
output = utils.run_command(self.context, "VM1", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.power_off_controller(primary_context_controller)
# TODO(sbelous): wait some time
self.wait_some_time()
self.check_all_reschedule(primary_controller)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
output = utils.run_command(self.context, "VM3", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
@base.scenario()
def destroy_non_primary_controller(self):
"""Destroy non primary controller
Scenario:
1. Create network1, subnets1, router1
2. Create network2, subnets2, router2
2. Launch 2 instances (vm1 and vm2) and associate floating ip
3. Add rules for ping
4. Choose one non primary controller
5. Rescedule network1 and router1 for chosen non primary controller
6. ping 8.8.8.8 from vm2
7. ping vm1 from vm2 and vm1 from vm2
8. Run udhcp on vm1
9. Destroy non primary controller
(virsh destroy <non_primary_controller>)
10. Wait some time
11. Check that all networks and routers rescedule
from non primary controller
11. Boot vm3 in network1
12. ping 8.8.8.8 from vm3
13. ping between vm1 and vm3 by internal ip
14. ping between vm1 and vm2 by floating ip
15. Run udhcp on vm1 and vm3
"""
networks = self.context["tenant"].get("networks")
if networks is None:
message = "Networks haven't been created with context"
LOG.debug(message)
raise
if len(networks) < 2:
message = "Haven't enough networks for the test"
LOG.debug(message)
raise
network1 = networks[0]
network2 = networks[1]
router1_id = network1.get("router_id")
net1_id = network1["id"]
net2_id = network2["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self._attach_floating_ip(vm1, "net04_ext")
self._attach_floating_ip(vm2, "net04_ext")
# Define internal IP and floating IP
net1_name = network1["name"]
net2_name = network2["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Find primary controller
non_primary_context_controller = self.find_non_primary_controller()
non_primary_controller = \
non_primary_context_controller['agent_endpoint']
# Get l3 agent for router1 and one dhcp agent for network1
# on non primary controller
self.get_dhcp_on_chosen_node(non_primary_controller, net1_id)
self.get_l3_on_chosen_node(non_primary_controller, router1_id)
# dhcp work
output = utils.run_command(self.context, "VM1", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.power_off_controller(non_primary_context_controller)
# TODO(sbelous): wait some time
self.wait_some_time()
self.check_all_reschedule(non_primary_controller)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
output = utils.run_command(self.context, "VM3", command="sudo udhcpc",
executor="shaker")
if output is None:
message = "dhcp agent doesn't work for VM1"
LOG.debug("output = %s", message)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)

View File

@ -1,34 +0,0 @@
import random
import time
from haos.rally.plugin import base_disaster
from rally.benchmark.scenarios import base
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class ControllerShutdown(base_disaster.BaseDisaster):
@base.scenario()
def power_off_and_on_one_controller(self):
"""This scenario selects one controller and shutdown it
Controller will be selected randomly, after the shutdown
this controller will be started again.
Setup:
OpenStack cloud with at least 3 controllers.
"""
controller_id = random.randint(0, len(self.context["controllers"]) - 1)
controller = self.context["controllers"][controller_id]
power_control_node = self.context["power_control_node"]
self.run_remote_command(power_control_node,
command=controller["hardware_power_off_cmd"])
time.sleep(controller["power_off_timeout"])
self.run_remote_command(power_control_node,
command=controller["hardware_power_on_cmd"])
time.sleep(controller["power_on_timeout"])

View File

@ -1,24 +0,0 @@
import random
from haos.rally.plugin import base_disaster
from rally.benchmark.scenarios import base
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class RunCommand(base_disaster.BaseDisaster):
@base.scenario()
def run_command_on_random_controller(self, command='', timeout=300):
"""This scenario executes bash command on random controller
:param command: command which should be executed
:param timeout: how long we will wait for command execution
"""
controller_id = random.randint(0, len(self.context["controllers"]) - 1)
controller = self.context["controllers"][controller_id]
LOG.info('Running command on controller: %s', controller)
self.run_remote_command(controller, command, timeout)

View File

@ -1,67 +0,0 @@
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import signal
from rally.benchmark import types
from rally import exceptions
import requests
from shaker import lib
def timeout_alarm(signum, frame):
msg = "Agent not respond"
raise exceptions.TimeoutException(msg)
def run_command(context, node, command, recover_command=None,
recover_timeout=0, executor="dummy", timeout=300):
if recover_command is not None:
action = {"node": node, "command": recover_command,
"timeout": recover_timeout, "executor": executor}
context["recover_commands"].append(action)
signal.signal(signal.SIGALRM, timeout_alarm)
signal.alarm(timeout)
if executor == "dummy":
r = requests.post("http://{0}/run_command".format(node),
headers={"Content-Type": "application/json"},
data=json.dumps({"command": command}))
return r.text
elif executor == "shaker":
shaker = context.get("shaker")
if not shaker:
shaker = lib.Shaker(context["shaker_endpoint"], [],
agent_loss_timeout=600)
context["shaker"] = shaker
r = shaker.run_script(node, command)
return r.get('stdout')
def get_server_agent_id(server):
for net, interfaces in server.addresses.items():
for interface in interfaces:
if interface.get('OS-EXT-IPS:type') == 'fixed':
return interface.get('OS-EXT-IPS-MAC:mac_addr')
raise Exception('Could not get MAC address from server: %s', server.id)
def get_server_net_id(clients, server):
net_name = server.addresses.keys()[0]
net_id = types.NeutronNetworkResourceType.transform(
clients=clients, resource_config=net_name)
return net_id

View File

@ -1,109 +0,0 @@
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import flask
from rally.common import log as logging
LOG = logging.getLogger(__name__)
app = flask.Flask(__name__)
@app.route('/reply/<agent_id>', methods=['POST'])
def reply(agent_id):
data = flask.request.data
LOG.debug('Got reply from agent: %s, %s', agent_id, data)
pipe = app.config['HAOS_PIPE']
if pipe:
pipe.send({agent_id: data})
return ''
@app.route('/poll/<agent_id>')
def poll(agent_id):
LOG.debug('Poll request from agent: %s', agent_id)
pipe = app.config['HAOS_PIPE']
if pipe:
has_data = pipe.poll()
tasks = app.config['HAOS_TASKS']
if has_data:
tasks.update(pipe.recv())
if agent_id in tasks:
command = tasks[agent_id]
LOG.debug('Scheduling command %s on agent %s', command, agent_id)
del tasks[agent_id]
return command
return ''
def _split_address(address):
try:
host, port = address.split(':')
port = int(port)
return host, port
except ValueError:
raise ValueError('Invalid address: %s, "host:port" expected', address)
def start_server(pipe, server_endpoint):
app.config['HAOS_PIPE'] = pipe
app.config['HAOS_TASKS'] = dict()
host, port = _split_address(server_endpoint)
LOG.info('Running the server at %s:%d', host, port)
app.run(host=host, port=port, debug=False)
def run(pipe, agent_id, command, timeout):
LOG.info('Running command %s on agent %s', command, agent_id)
pipe.send({agent_id: command})
has_data = pipe.poll(timeout)
if has_data:
data = pipe.recv()[agent_id]
LOG.debug('Received data %s from agent %s', data, agent_id)
return data
else:
LOG.warn('Timeout while receiving data from agent %s', agent_id)
return None
class Server(object):
def __init__(self, server_endpoint):
LOG.info('Server listens at %s', server_endpoint)
self.parent_conn, self.child_conn = multiprocessing.Pipe()
self.child = multiprocessing.Process(
target=start_server, args=(self.child_conn, server_endpoint))
self.child.start()
def __del__(self):
LOG.info('Server stops')
self.parent_conn.close()
self.child.terminate()
def remote_control(self, agent_id, command, timeout):
return run(self.parent_conn, agent_id, command, timeout)

View File

@ -1,29 +0,0 @@
import paramiko
from rally.common import log as logging
LOG = logging.getLogger(__name__)
def run(host, username, password, command, timeout):
msg = 'Running command "{0}" on server {1}'
LOG.info(msg.format(command, host))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=username, password=password)
_, ssh_stdout, ssh_stderr = ssh.exec_command(command, timeout=timeout)
while not ssh_stdout.channel.exit_status_ready():
"Wait while all commands will be finished successfully."
pass
return ssh_stdout, ssh_stderr
class SSHConnection(object):
def remote_control(self, host, command, timeout=30):
return run(host["public_ip"], host["user"], host["password"], command,
timeout)

View File

@ -1,39 +0,0 @@
{
"NeutronL3Disaster.ban_one_l3_agent": [
{
"runner": {
"type": "serial",
"times": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"roles":[
"admin"
],
"recover_cloud": {
"checks": ["rabbitmq_cluster_status"]
},
"network": {
"networks_per_tenant": 2
},
"cloud":{ },
"cloud_nodes": {
"controllers": [
{
"agent_endpoint": "node-4.domain.tld"
},
{
"agent_endpoint": "node-5.domain.tld"
},
{
"agent_endpoint": "node-3.domain.tld"
}
]
}
}
}
]
}

View File

@ -1,39 +0,0 @@
{
"NeutronL3Disaster.ban_some_l3_agents": [
{
"runner": {
"type": "serial",
"times": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"roles":[
"admin"
],
"recover_cloud": {
"checks": ["rabbitmq_cluster_status"]
},
"network": {
"networks_per_tenant": 2
},
"cloud": { },
"cloud_nodes": {
"controllers": [
{
"agent_endpoint": "node-5.domain.tld"
},
{
"agent_endpoint": "node-4.domain.tld"
},
{
"agent_endpoint": "node-3.domain.tld"
}
]
}
}
}
]
}

View File

@ -1,36 +0,0 @@
{
"RunCommand.run_command_on_random_controller": [
{
"args": {
"command": "iptables -I INPUT 1 -p tcp --destination-port galeracheck -j DROP && sleep 20 && iptables -D INPUT -p tcp --destination-port galeracheck -j DROP",
"timeout": 300
},
"runner": {
"type": "serial",
"times": 10
},
"context": {
"cloud_nodes": {
"controllers": [
{
"public_ip": "172.16.0.4",
"user": "root",
"password": "secret"
},
{
"public_ip": "172.16.0.5",
"user": "root",
"password": "secret"
},
{
"public_ip": "172.16.0.4",
"user": "root",
"password": "secret"
}
],
"remote_control_type": "ssh"
}
}
}
]
}

View File

@ -1,36 +0,0 @@
{
"RunCommand.run_command_on_random_controller": [
{
"args": {
"command": "iptables -I INPUT 1 -p tcp --destination-port 5673 -j DROP && sleep 20 && iptables -D INPUT -p tcp --destination-port 5673 -j DROP",
"timeout": 300
},
"runner": {
"type": "serial",
"times": 10
},
"context": {
"cloud_nodes": {
"controllers": [
{
"public_ip": "172.16.0.4",
"user": "root",
"password": "secret"
},
{
"public_ip": "172.16.0.5",
"user": "root",
"password": "secret"
},
{
"public_ip": "172.16.0.4",
"user": "root",
"password": "secret"
}
],
"remote_control_type": "ssh"
}
}
}
]
}

View File

@ -1,15 +0,0 @@
---
NeutronDHCPDisaster.kill_dhcp_agent:
-
args: {}
runner:
type: "constant"
times: 1
context:
users:
tenants: 1
users_per_tenant: 1
roles: [admin]
network:
networks_per_tenant: 1
cloud: {}

View File

@ -1,40 +0,0 @@
{
"ControllerShutdown.power_off_and_on_one_controller": [
{
"runner": {
"type": "serial",
"times": 1
},
"context": {
"cloud_nodes": {
"controllers": [
{
"hardware_power_on_cmd": "VBoxManage startvm fuel-slave-1 --type headless",
"hardware_power_off_cmd": "VBoxManage controlvm fuel-slave-1 poweroff",
"power_off_timeout": 180,
"power_on_timeout": 90
},
{
"hardware_power_on_cmd": "VBoxManage startvm fuel-slave-2 --type headless",
"hardware_power_off_cmd": "VBoxManage controlvm fuel-slave-2 poweroff",
"power_off_timeout": 180,
"power_on_timeout": 90
},
{
"hardware_power_on_cmd": "VBoxManage startvm fuel-slave-3 --type headless",
"hardware_power_off_cmd": "VBoxManage controlvm fuel-slave-3 poweroff",
"power_off_timeout": 180,
"power_on_timeout": 90
}
],
"power_control_node": {
"public_ip": "172.18.78.30",
"user": "xwizard",
"password": "xWizard707"
},
"remote_control_type": "ssh"
}
}
}
]
}

View File

@ -1,44 +0,0 @@
{
"RabbitMQDisasterScenarios.power_off_one_controller": [
{
"runner": {
"type": "serial",
"times": 1
},
"context": {
"recover_cloud": {
"checks": ["rabbitmq_cluster_status" ]
},
"cloud_nodes": {
"controllers": [
{
"agent_endpoint": "172.16.0.4:10707",
"hardware_power_on_cmd": "VBoxManage startvm fuel-slave-2 --type headless",
"hardware_power_off_cmd": "VBoxManage controlvm fuel-slave-2 poweroff",
"power_off_timeout": 20,
"power_on_timeout": 30
},
{
"agent_endpoint": "172.16.0.5:10707",
"hardware_power_on_cmd": "VBoxManage startvm fuel-slave-5 --type headless",
"hardware_power_off_cmd": "VBoxManage controlvm fuel-slave-5 poweroff",
"power_off_timeout": 20,
"power_on_timeout": 30
},
{
"agent_endpoint": "172.16.0.6:10707",
"hardware_power_on_cmd": "VBoxManage startvm fuel-slave-4 --type headless",
"hardware_power_off_cmd": "VBoxManage controlvm fuel-slave-4 poweroff",
"power_off_timeout": 20,
"power_on_timeout": 30
}
],
"shaker_endpoint": "172.16.0.1:5999",
"power_control_node": {
"agent_endpoint": "localhost:10707"
}
}
}
}
]
}

View File

@ -1,5 +0,0 @@
python-shaker (0.2.2) UNRELEASED; urgency=medium
* Initial release
-- Aleksey Galkin <agalkin@mirantis.com> Tue, 17 Mar 2015 15:00:00 +0300

View File

@ -1 +0,0 @@
7

View File

@ -1,35 +0,0 @@
Source: python-shaker
Section: python
Priority: extra
Maintainer: Aleksey Galkin <agalkin@mirantis.com>
Build-Depends:
debhelper (>= 8.0.0),
python-setuptools,
python-pbr (>=0.6),
Standards-Version: 3.9.4
Homepage: https://launchpad.net/python-shaker
XS-Testsuite: autopkgtest
Package: python-shaker
Architecture: all
Depends: python-pkg-resources,
python-pbr (>= 0.8), python-pbr (<= 1.0),
iso8601 (>= 0.1.9),
Jinja2 (>= 2.6),
python-oslo.concurrency (>= 1.3.0),
oslo-config (>= 1.6.0),
python-oslo.i18n (>= 1.3.0),
python-oslo.log (>= 0.4.0),
python-oslo.serialization (>= 1.2.0),
python-oslo.utils (>= 1.2.0),
python-glanceclient (>= 0.15.0),
python-keystoneclient (>= 1.1.0),
python-neutronclient (>= 2.3.11), python-neutronclient (<< 3),
python-novaclient (>= 2.18.0), python-novaclient(!= 2.21.0),
python-heatclient (>= 0.3.0),
PyYAML (>= 3.1.0),
python-zmq (>=14.3.1),
six (>= 1.9.0),
${python:Misc}
${python:Depends}
Description: EC2 Salt Minion Launcher

View File

@ -1 +0,0 @@
python-shacker_0.0.2_all.deb python extra

View File

@ -1,14 +0,0 @@
#!/usr/bin/make -f
# -*- makefile -*-
# export DH_VERBOSE=1
%:
dh $@ --buildsystem python_distutils --with python2
override_dh_auto_install:
dh_auto_install
override_dh_python2:
dh_python2 --no-guessing-deps

View File

@ -1,89 +0,0 @@
Name: python-shaker
Epoch: 1
Version: 0.2.2
Release: 1%{?dist}
Summary: EC2 Salt Minion Launcher
License: ASL 2.0
URL: http://pypi.python.org/pypi/%{name}
Source0: http://pypi.python.org/packages/source/p/%{name}/%{name}-%{version}.tar.gz
BuildArch: noarch
BuildRequires: python2-devel
BuildRequires: python-setuptools
BuildRequires: python-pbr
BuildRequires: python-d2to1
BuildRequires: python-sphinx
BuildRequires: python-oslo-sphinx
BuildRequires: python-six
Requires: python-pbr >= 0.8, python-pbr <= 1.0
Requires: python-iso8601 >= 0.1.9
Requires: python-jinja2 >= 2.6
Requires: python-oslo-concurrency >= 1.3.0
Requires: python-oslo-config >= 1.6.0
Requires: python-oslo-i18n >= 1.3.0
Requires: python-oslo-log >= 0.4.0
Requires: python-oslo-serialization >= 1.2.0
Requires: python-oslo-utils >= 1.2.0
Requires: python-glanceclient >= 0.15.0
Requires: python-keystoneclient >= 1.1.0
Requires: python-neutronclient >= 2.3.11, python-neutronclient < 3
Requires: python-novaclient >= 2.18.0, python-novaclient < 2.21.0, python-novaclient > 2.21.0
Requires: python-heatclient >= 0.3.0
Requires: PyYAML >= 3.1.0
Requires: python-zmq >= 14.3.1
Requires: python-six >= 1.9.0
Requires: python-setuptools
%description
Shake VMs with our sheer-class tests!
%package doc
Summary: Documentation for Shaker
Group: Documentation
BuildRequires: python-sphinx
BuildRequires: python-sphinxcontrib-httpdomain
%description doc
Documentation for the Shacker.
%prep
%setup -q
rm -f test-requirements.txt requirements.txt
rm -rf python_shaker.egg-info
%build
%{__python} setup.py build
%install
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
rm -fr %{buildroot}%{python_sitelib}/tests
export PYTHONPATH="$( pwd ):$PYTHONPATH"
sphinx-build -b html doc/source html
sphinx-build -b man doc/source man
install -p -D -m 644 man/shaker.1 %{buildroot}%{_mandir}/man1/shaker.1
rm -fr html/.doctrees html/.buildinfo
%files
%doc LICENSE README.rst
%{_bindir}/shaker*
%{python_sitelib}/shaker
%{python_sitelib}/*.egg-info
%{_mandir}/man1/shaker.1*
%files doc
%doc LICENSE html
%changelog

View File

@ -1,22 +0,0 @@
diff --git a/rally/cmd/cliutils.py b/rally/cmd/cliutils.py
index 896b141..e35898b 100644
--- a/rally/cmd/cliutils.py
+++ b/rally/cmd/cliutils.py
@@ -433,6 +433,8 @@ def run(argv, categories):
handler=parser)
CONF.register_cli_opt(category_opt)
+ CONF.register_cli_opt(cfg.ListOpt("plugin-path",
+ help="Custom plugins location"))
try:
CONF(argv[1:], project="rally", version=version.version_string())
@@ -511,6 +513,8 @@ def run(argv, categories):
utils.load_plugins("/opt/rally/plugins/")
utils.load_plugins(os.path.expanduser("~/.rally/plugins/"))
utils.import_modules_from_package("rally.plugins")
+ for path in CONF.plugin_path or []:
+ utils.load_plugins(path)
validate_deprecated_args(argv, fn)

View File

@ -1,12 +0,0 @@
diff --git a/requirements.txt b/requirements.txt
index b31ece0..cb185c4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,7 +19,6 @@ paramiko>=1.13.0
pbr>=0.11,<2.0
PrettyTable>=0.7,<0.8
PyYAML>=3.1.0
-psycopg2
python-designateclient>=1.0.0
python-glanceclient>=0.17.1
python-keystoneclient>=1.3.0

View File

@ -1,8 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=0.6,!=0.7,<1.0
flask
pyshaker

View File

@ -1,19 +0,0 @@
[metadata]
name = haos
summary = HA 4 OpenStack test suite
description-file =
README.md
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://www.openstack.org/
classifier =
Environment :: OpenStack
Intended Audience :: Developers
Intended Audience :: Information Technology
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4

View File

@ -1,30 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

View File

@ -1,7 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# Hacking already pins down pep8, pyflakes and flake8
hacking>=0.8.0,<0.9
testtools>=0.9.36,!=1.2.0

View File

@ -1,28 +0,0 @@
#!/bin/sh -x
MAC_ADDRESS=`ip -4 l sh eth0 | grep link | awk '{print $2}'`
HAOS_SERVER_ENDPOINT=${1:-$HAOS_SERVER_ENDPOINT}
AGENT_ID=${2:-$MAC_ADDRESS}
POLLING_INTERVAL=${3:-2}
while true; do
COMMAND=`curl --stderr /dev/null http://${HAOS_SERVER_ENDPOINT}/poll/${AGENT_ID}`
if [ ! -z "${COMMAND}" ]; then
echo ${COMMAND}
CMD_EXEC="/tmp/haosagentcmd"
echo "${COMMAND}" > ${CMD_EXEC}
chmod +x ${CMD_EXEC}
STDOUT=`sh ${CMD_EXEC}`
CMD_OUT="/tmp/haosagentout"
echo ${STDOUT} > ${CMD_OUT}
curl --stderr /dev/null --data-binary "@${CMD_OUT}" -H "Content-Type: application/binary" http://${HAOS_SERVER_ENDPOINT}/reply/${AGENT_ID}
fi
sleep ${POLLING_INTERVAL}
done

View File

@ -1,142 +0,0 @@
#!/bin/bash -xe
TOP_DIR=$(cd $(dirname "$0") && pwd)
NETWORK_NAME=${NETWORK_NAME:-net04}
HAOS_SERVER_ENDPOINT=${HAOS_SERVER_ENDPOINT}
IMAGE_NAME=${HAOS_IMAGE:-haos-image}
FLAVOR_NAME=${HAOS_FLAVOR:-haos-flavor}
CLOUD_IMAGE_NAME="haos-cloud-image"
CIRROS_IMAGE_URL="http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
error() {
printf "\e[31mError: %s\e[0m\n" "${*}" >&2
exit 1
}
message() {
printf "\e[33m%s\e[0m\n" "${1}"
}
remote_shell() {
host=$1
key=$2
command=$3
ssh -i ${key} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10 cirros@${host} "$command"
}
remote_cp() {
host=$1
key=$2
src=$3
dst=$4
scp -i ${key} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10 "$src" cirros@${host}:${dst}
}
build_image() {
message "Installing Haos image, will take some time"
if [ -z "$(glance image-show ${CLOUD_IMAGE_NAME})" ]; then
message "Downloading Cirros image"
glance image-create --name ${CLOUD_IMAGE_NAME} --disk-format qcow2 --container-format bare --is-public True --copy-from ${CIRROS_IMAGE_URL}
until [ -n "$(glance image-show ${CLOUD_IMAGE_NAME} | grep status | grep active)" ]; do
sleep 5
done
fi
UUID=$(cat /proc/sys/kernel/random/uuid)
message "Creating security group"
SEC_GROUP="haos-access-${UUID}"
nova secgroup-create ${SEC_GROUP} "Security Group for Haos"
nova secgroup-add-rule ${SEC_GROUP} icmp -1 -1 0.0.0.0/0
nova secgroup-add-rule ${SEC_GROUP} tcp 1 65535 0.0.0.0/0
nova secgroup-add-rule ${SEC_GROUP} udp 1 65535 0.0.0.0/0
message "Creating flavor"
if [ -n "$(nova flavor-list | grep ${FLAVOR_NAME})" ]; then
nova flavor-delete ${FLAVOR_NAME}
fi
nova flavor-create --is-public true ${FLAVOR_NAME} auto 64 0 1
message "Creating key pair"
KEY_NAME="haos-key-${UUID}"
KEY="`mktemp`"
nova keypair-add ${KEY_NAME} > ${KEY}
chmod og-rw ${KEY}
message "Booting VM"
NETWORK_ID=`neutron net-show ${NETWORK_NAME} -f value -c id`
VM="haos-template-${UUID}"
nova boot --poll --flavor ${FLAVOR_NAME} --image ${CLOUD_IMAGE_NAME} --key_name ${KEY_NAME} --nic net-id=${NETWORK_ID} --security-groups ${SEC_GROUP} ${VM}
message "Associating a floating IP with VM"
FLOATING_IP=`neutron floatingip-create -f value -c floating_ip_address net04_ext | tail -1`
nova floating-ip-associate ${VM} ${FLOATING_IP}
message "Waiting for VM to boot up"
until remote_shell ${FLOATING_IP} ${KEY} "echo"; do
sleep 10
done
message "Installing haos agent into VM"
remote_cp ${FLOATING_IP} ${KEY} ${TOP_DIR}/agent/haosagent /tmp/haosagent
remote_shell ${FLOATING_IP} ${KEY} "sudo cp /tmp/haosagent /usr/bin/"
remote_shell ${FLOATING_IP} ${KEY} "sudo chmod 755 /usr/bin/haosagent"
HAOSAGENT_INIT="`mktemp`"
cat > ${HAOSAGENT_INIT} << EOF
#!/bin/sh
case "\$1" in
start)
export HAOS_SERVER_ENDPOINT="${HAOS_SERVER_ENDPOINT}"
start-stop-daemon -S -b -q -p /var/run/haosagent.pid --exec /usr/bin/haosagent
echo "OK"
;;
stop) :;;
*) echo "unknown argument ${1}" 1>&2;;
esac
EOF
remote_cp ${FLOATING_IP} ${KEY} ${HAOSAGENT_INIT} /tmp/S97-haosagent
remote_shell ${FLOATING_IP} ${KEY} "sudo cp /tmp/S97-haosagent /etc/init.d/"
remote_shell ${FLOATING_IP} ${KEY} "sudo chmod 755 /etc/init.d/S97-haosagent"
remote_shell ${FLOATING_IP} ${KEY} "sudo ln -s /etc/init.d/S97-haosagent /etc/rc3.d/"
remote_shell ${FLOATING_IP} ${KEY} "sudo /sbin/poweroff"
sleep 10
message "Making VM snapshot"
nova image-create --poll ${VM} ${IMAGE_NAME}
glance image-update --is-public True ${IMAGE_NAME}
message "Destroy VM"
nova delete ${VM}
message "Waiting for VM to die"
until [ -z "$(nova list | grep ${VM})" ]; do
sleep 5
done
message "Cleaning up resources"
FP_ID=`neutron floatingip-list -f csv -c id -c floating_ip_address --quote none | grep ${FLOATING_IP} | awk -F "," '{print $1}'`
neutron floatingip-delete ${FP_ID}
nova secgroup-delete ${SEC_GROUP}
nova keypair-delete ${KEY_NAME}
}
main() {
if [ -z ${HAOS_SERVER_ENDPOINT} ]; then
error "Set HAOS_SERVER_ENDPOINT env var"
exit 1
fi
if [ -z "$(glance image-show ${IMAGE_NAME})" ]; then
build_image
else
message "Image ${IMAGE_NAME} already exists."
fi
}
main "$@"

View File

@ -1,19 +0,0 @@
#!/bin/bash -xe
SERVER_ENDPOINT=$1
for i in `fuel nodes 2>/dev/null | grep ready | awk '{print $1}'`
do
NODE_NAME="node-${i}"
echo "Hacking ${NODE_NAME}"
scp hack_openstack_node.sh ${NODE_NAME}:/root/
scp haosagent ${NODE_NAME}:/root/
ssh ${NODE_NAME} "/root/hack_openstack_node.sh ${SERVER_ENDPOINT}"
done
if [ ! -d "mos-tempest-runner" ]; then
yum -y install git
git clone https://github.com/Mirantis/mos-tempest-runner.git
cd mos-tempest-runner
./setup_env.sh
fi

View File

@ -1,10 +0,0 @@
#!/bin/bash -xe
SERVER_ENDPOINT=$1
AGENT_ID=$(hostname)
cd /root/
killall haosagent || true
screen -dmS shaker-agent-screen /root/haosagent ${SERVER_ENDPOINT} ${AGENT_ID}
(crontab -l ; echo "@reboot /root/haosagent ${SERVER_ENDPOINT} ${AGENT_ID} &") | sort - | uniq - | crontab -

View File

@ -1,22 +0,0 @@
#!/bin/bash -xe
TOP_DIR=$(cd $(dirname "$0") && pwd)
MARKER="${TOP_DIR}/../.tox/run/.installed"
SSHPASS_EXEC="$(which sshpass)"
if [ -z ${SSHPASS_EXEC} ]; then
echo "sshpass is not installed!"
exit 1
fi
if [ ! -f "${MARKER}" ]; then
echo "Install Shaker agents onto OpenStack nodes"
sshpass -p ${FUEL_PASSWORD} scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${TOP_DIR}/helpers/hack_fuel_master.sh ${FUEL_USERNAME}@${FUEL_HOST}:/root/
sshpass -p ${FUEL_PASSWORD} scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${TOP_DIR}/helpers/hack_openstack_node.sh ${FUEL_USERNAME}@${FUEL_HOST}:/root/
sshpass -p ${FUEL_PASSWORD} scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${TOP_DIR}/agent/haosagent ${FUEL_USERNAME}@${FUEL_HOST}:/root/
sshpass -p ${FUEL_PASSWORD} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${FUEL_USERNAME}@${FUEL_HOST} /root/hack_fuel_master.sh ${HAOS_SERVER_ENDPOINT}
touch ${MARKER}
fi

View File

@ -1,23 +0,0 @@
#!/bin/bash
TOP_DIR=$(cd $(dirname "$0") && pwd)
RALLY_EXEC="$(which rally | grep tox)"
if [ -z ${RALLY_EXEC} ]; then
echo "Install and patch rally"
TEMP_DIR="$(mktemp -d)"
cd ${TEMP_DIR}
git clone git://git.openstack.org/openstack/rally
cd rally
RALLY_VERSION="$(git describe --abbrev=0)"
git checkout ${RALLY_VERSION}
git apply ${TOP_DIR}/../patches/01-rally-plugin-dir.patch
git apply ${TOP_DIR}/../patches/02-rally-no-postgresql.patch
python setup.py install
rally-manage db recreate
rally deployment create --fromenv --name=haos
fi

View File

@ -1,6 +0,0 @@
#!/bin/bash -xe
tools/verify.sh || exit 1
tools/install_rally.sh || exit 1
tools/install_haos_agents.sh || exit 1
tools/build_image.sh || exit 1

View File

@ -1,9 +0,0 @@
#!/bin/bash
TOP_DIR=$(cd $(dirname "$0") && pwd)
SCENARIO=$1
if [ ! -z ${SCENARIO} ]; then
rally --debug --plugin-path ${TOP_DIR}/../haos/rally/context,${TOP_DIR}/../haos/rally/plugin task start ${SCENARIO}
fi

View File

@ -1,18 +0,0 @@
#!/bin/bash
echo "Verifying your env (have you tuned etc/openrc already?)"
if [ -z ${HAOS_SERVER_ENDPOINT} ]; then
echo "Specify value for HAOS_SERVER_ENDPOINT env var"
exit 1
fi
if [ -z ${FUEL_HOST} ]; then
echo "Specify value for FUEL_HOST env var"
exit 1
fi
if [ -z ${OS_AUTH_URL} ]; then
echo "Specify value for OS_AUTH_URL env var"
exit 1
fi

37
tox.ini
View File

@ -1,37 +0,0 @@
[tox]
envlist = pep8
minversion = 1.6
skipsdist = True
[testenv]
usedevelop = True
install_command = pip install -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands = python setup.py testr --testr-args='{posargs}'
[testenv:pep8]
commands = flake8
[testenv:run]
commands =
bash tools/install_rally.sh
bash tools/run_rally.sh {posargs}
whitelist_externals = bash
[testenv:run-for-mos]
commands =
bash tools/make_mos_env.sh
bash tools/run_rally.sh {posargs}
whitelist_externals = bash
[flake8]
# E125 continuation line does not distinguish itself from next logical line
ignore = E125
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools,build