Update README with links to the latest KloudBuster

Change-Id: I83c64cd577cbbfce306738a61ba7c307a8e0798f
This commit is contained in:
Yichen Wang 2015-09-08 11:21:43 -07:00
parent 7b453e5cef
commit 3d2ab44c1c
44 changed files with 6 additions and 4907 deletions

View File

@ -1,8 +0,0 @@
This is for VMTP scale testing
This is work in progress for now
The idea is to be able to
1. Create tenant and users to load the cloud
2. Create routers within a User in a tenant
3. Create N networks per router
4. Create N VMs per network
5. Clean up all resources by default (Provide ability to avoid cleanup)

6
scale/README.rst Normal file
View File

@ -0,0 +1,6 @@
KloudBuster has been moved to openstack/kloudbuster. Please refer to below links for the latest information about KloudBuster:
* Documentation: `<http://kloudbuster.readthedocs.org>`_
* Source: `<http://git.openstack.org/cgit/openstack/kloudbuster>`_
* Supports/Bugs: `<http://launchpad.net/kloudbuster>`_
* Mailing List: kloudbuster-core@lists.launchpad.net

View File

@ -1,258 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import log as logging
LOG = logging.getLogger(__name__)
class BaseCompute(object):
"""
The Base class for nova compute resources
1. Creates virtual machines with specific configs
"""
def __init__(self, vm_name, network):
self.novaclient = network.router.user.nova_client
self.network = network
self.vm_name = vm_name
self.instance = None
self.host = None
self.fip = None
self.fip_ip = None
self.subnet_ip = None
self.fixed_ip = None
self.ssh_ip = None
# Shared interface ip for tested and testing cloud
self.shared_interface_ip = None
# Create a server instance with associated
# security group, keypair with a provided public key
def create_server(self, image_name, flavor_type, keyname,
nic, sec_group, avail_zone=None, user_data=None,
config_drive=None, retry_count=100):
"""
Create a VM instance given following parameters
1. VM Name
2. Image Name
3. Flavor name
4. key pair name
5. Security group instance
6. Optional parameters: availability zone, user data, config drive
"""
# Get the image id and flavor id from their logical names
image = self.find_image(image_name)
flavor_type = self.find_flavor(flavor_type)
# Also attach the created security group for the test
instance = self.novaclient.servers.create(name=self.vm_name,
image=image,
flavor=flavor_type,
key_name=keyname,
nics=nic,
availability_zone=avail_zone,
userdata=user_data,
config_drive=config_drive,
security_groups=[sec_group.id])
if not instance:
return None
# Verify that the instance gets into the ACTIVE state
for _ in range(retry_count):
instance = self.novaclient.servers.get(instance.id)
if instance.status == 'ACTIVE':
self.instance = instance
if 'OS-EXT-SRV-ATTR:hypervisor_hostname' in instance.__dict__:
self.host = instance.__dict__['OS-EXT-SRV-ATTR:hypervisor_hostname']
else:
self.host = "Unknown"
return instance
if instance.status == 'ERROR':
LOG.error('Instance creation error:' + instance.fault['message'])
break
# print "[%s] VM status=%s, retrying %s of %s..." \
# % (vmname, instance.status, (retry_attempt + 1), retry_count)
time.sleep(2)
def get_server_list(self):
servers_list = self.novaclient.servers.list()
return servers_list
def delete_server(self):
# First delete the instance
if self.instance:
self.novaclient.servers.delete(self.instance)
self.instance = None
def find_image(self, image_name):
"""
Given a image name return the image id
"""
try:
image = self.novaclient.images.find(name=image_name)
return image
except Exception:
return None
def find_flavor(self, flavor_type):
"""
Given a named flavor return the flavor
"""
flavor = self.novaclient.flavors.find(name=flavor_type)
return flavor
class SecGroup(object):
def __init__(self, novaclient):
self.secgroup = None
self.secgroup_name = None
self.novaclient = novaclient
def create_secgroup_with_rules(self, group_name):
group = self.novaclient.security_groups.create(name=group_name,
description="Test sec group")
# Allow ping traffic
self.novaclient.security_group_rules.create(group.id,
ip_protocol="icmp",
from_port=-1,
to_port=-1)
# Allow SSH traffic
self.novaclient.security_group_rules.create(group.id,
ip_protocol="tcp",
from_port=22,
to_port=22)
# Allow HTTP traffic
self.novaclient.security_group_rules.create(group.id,
ip_protocol="tcp",
from_port=80,
to_port=80)
# Allow Redis traffic
self.novaclient.security_group_rules.create(group.id,
ip_protocol="tcp",
from_port=6379,
to_port=6379)
# Allow Nuttcp traffic
self.novaclient.security_group_rules.create(group.id,
ip_protocol="tcp",
from_port=5001,
to_port=5002)
self.novaclient.security_group_rules.create(group.id,
ip_protocol="udp",
from_port=5001,
to_port=5001)
self.secgroup = group
self.secgroup_name = group_name
def delete_secgroup(self):
"""
Delete the security group
Sometimes this maybe in use if instance is just deleted
Add a retry mechanism
"""
for _ in range(10):
try:
self.novaclient.security_groups.delete(self.secgroup)
break
except Exception:
time.sleep(2)
class KeyPair(object):
def __init__(self, novaclient):
self.keypair = None
self.keypair_name = None
self.novaclient = novaclient
def add_public_key(self, name, public_key_file=None):
"""
Add the KloudBuster public key to openstack
"""
public_key = None
try:
with open(os.path.expanduser(public_key_file)) as pkf:
public_key = pkf.read()
except IOError as exc:
LOG.error("Cannot open public key file %s: %s" % (public_key_file,
exc))
LOG.info("Adding public key %s" % name)
keypair = self.novaclient.keypairs.create(name, public_key)
self.keypair = keypair
self.keypair_name = name
def remove_public_key(self):
"""
Remove the keypair created by KloudBuster
"""
self.novaclient.keypairs.delete(self.keypair)
class Flavor(object):
def __init__(self, novaclient):
self.novaclient = novaclient
def list(self):
return self.novaclient.flavors.list()
def create_flavor(self, name, ram, vcpus, disk, override=False):
# Creating flavors
if override:
self.delete_flavor(name)
return self.novaclient.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
def delete_flavor(self, name):
try:
flavor = self.novaclient.flavors.find(name=name)
flavor.delete()
except Exception:
pass
class NovaQuota(object):
def __init__(self, novaclient, tenant_id):
self.novaclient = novaclient
self.tenant_id = tenant_id
def get(self):
return self.novaclient.quotas.get(self.tenant_id).__dict__
def update_quota(self, **kwargs):
self.novaclient.quotas.update(self.tenant_id, **kwargs)
class CinderQuota(object):
def __init__(self, cinderclient, tenant_id):
self.cinderclient = cinderclient
self.tenant_id = tenant_id
def get(self):
return self.cinderclient.quotas.get(self.tenant_id).__dict__
def update_quota(self, **kwargs):
self.cinderclient.quotas.update(self.tenant_id, **kwargs)

View File

@ -1,387 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from perf_instance import PerfInstance
import base_compute
import log as logging
import netaddr
from neutronclient.common.exceptions import NetworkInUseClient
LOG = logging.getLogger(__name__)
# Global CIDR shared by all objects of this class
# Enables each network to get a unique CIDR
START_CIDR = "10.0.0.0/16"
cidr = START_CIDR
class KBGetExtNetException(Exception):
pass
def create_floating_ip(neutron_client, ext_net):
"""
Function that creates a floating ip and returns it
Accepts the neutron client and ext_net
Module level function since this is not associated with a
specific network instance
"""
body = {
"floatingip": {
"floating_network_id": ext_net['id']
}
}
fip = neutron_client.create_floatingip(body)
return fip
def delete_floating_ip(neutron_client, fip):
"""
Deletes the floating ip
Module level function since this operation
is not associated with a network
"""
neutron_client.delete_floatingip(fip)
def find_external_network(neutron_client):
"""
Find the external network
and return it
If no external network is found return None
"""
networks = neutron_client.list_networks()['networks']
for network in networks:
if network['router:external']:
return network
LOG.error("No external network is found.")
raise KBGetExtNetException()
class BaseNetwork(object):
"""
The Base class for neutron network operations
1. Creates networks with 1 subnet inside each network
2. Increments a global CIDR for all network instances
3. Deletes all networks on completion
4. Also interacts with the compute class for instances
"""
def __init__(self, router):
"""
Store the neutron client
User name for this network
and network object
"""
self.neutron_client = router.user.neutron_client
self.nova_client = router.user.nova_client
self.router = router
self.network = None
self.instance_list = []
self.secgroup_list = []
def create_compute_resources(self, network_prefix, config_scale):
"""
Creates the compute resources includes the following resources
1. VM instances
2. Security groups
3. Keypairs
"""
# Create the security groups first
for secgroup_count in range(config_scale['secgroups_per_network']):
secgroup_instance = base_compute.SecGroup(self.nova_client)
self.secgroup_list.append(secgroup_instance)
secgroup_name = network_prefix + "-SG" + str(secgroup_count)
secgroup_instance.create_secgroup_with_rules(secgroup_name)
LOG.info("Scheduled to create VM for network %s..." % network_prefix)
if config_scale['use_floatingip']:
external_network = find_external_network(self.neutron_client)
# Schedule to create the required number of VMs
for instance_count in range(config_scale['vms_per_network']):
vm_name = network_prefix + "-I" + str(instance_count)
perf_instance = PerfInstance(vm_name, self, config_scale)
self.instance_list.append(perf_instance)
perf_instance.subnet_ip = self.network['subnet_ip']
if config_scale['use_floatingip']:
# Create the floating ip for the instance
# store it and the ip address in perf_instance object
perf_instance.fip = create_floating_ip(self.neutron_client, external_network)
perf_instance.fip_ip = perf_instance.fip['floatingip']['floating_ip_address']
# Create the VMs on specified network, first keypair, first secgroup
perf_instance.boot_info['image_name'] = config_scale['image_name']
perf_instance.boot_info['keyname'] = self.router.user.key_name
perf_instance.boot_info['nic'] = [{'net-id': self.network['id']}]
perf_instance.boot_info['sec_group'] = self.secgroup_list[0].secgroup
perf_instance.boot_info['avail_zone'] = self.router.user.tenant.kloud.get_az()
def delete_compute_resources(self):
"""
Deletes the compute resources
Security groups, keypairs and instances
"""
# Delete the instances first
for instance in self.instance_list:
instance.delete_server()
if instance.fip:
"""
Delete the Floating IP
Sometimes this will fail if instance is just deleted
Add a retry mechanism
"""
for _ in range(10):
try:
delete_floating_ip(self.neutron_client, instance.fip['floatingip']['id'])
break
except Exception:
time.sleep(1)
# Delete all security groups
for secgroup_instance in self.secgroup_list:
secgroup_instance.delete_secgroup()
def create_network_and_subnet(self, network_name):
"""
Create a network with 1 subnet inside it
"""
subnet_name = "KB_subnet_" + network_name
body = {
'network': {
'name': network_name,
'admin_state_up': True
}
}
self.network = self.neutron_client.create_network(body)['network']
# Now create the subnet inside this network support ipv6 in future
body = {
'subnet': {
'name': subnet_name,
'cidr': self.generate_cidr(),
'network_id': self.network['id'],
'enable_dhcp': True,
'ip_version': 4
}
}
subnet = self.neutron_client.create_subnet(body)['subnet']
# add subnet id to the network dict since it has just been added
self.network['subnets'] = [subnet['id']]
self.network['subnet_ip'] = cidr
def generate_cidr(self):
"""Generate next CIDR for network or subnet, without IP overlapping.
"""
global cidr
cidr = str(netaddr.IPNetwork(cidr).next())
return cidr
def delete_network(self):
"""
Deletes the network and associated subnet
retry the deletion since network may be in use
"""
for _ in range(1, 5):
try:
self.neutron_client.delete_network(self.network['id'])
break
except NetworkInUseClient:
time.sleep(1)
def get_all_instances(self):
return self.instance_list
class Router(object):
"""
Router class to create a new routers
Supports addition and deletion
of network interfaces to router
"""
def __init__(self, user):
self.neutron_client = user.neutron_client
self.nova_client = user.nova_client
self.router = None
self.user = user
# Stores the list of networks
self.network_list = []
# Store the shared network
self.shared_network = None
self.shared_port_id = None
# Store the interface ip of shared network attached to router
self.shared_interface_ip = None
def create_network_resources(self, config_scale):
"""
Creates the required number of networks per router
Also triggers the creation of compute resources inside each
network
"""
for network_count in range(config_scale['networks_per_router']):
network_instance = BaseNetwork(self)
self.network_list.append(network_instance)
# Create the network and subnet
network_name = self.router['router']['name'] + "-N" + str(network_count)
network_instance.create_network_and_subnet(network_name)
# Attach the created network to router interface
self.attach_router_interface(network_instance)
# Create the compute resources in the network
network_instance.create_compute_resources(network_name, config_scale)
def get_first_network(self):
if self.network_list:
return self.network_list[0]
return None
def get_all_instances(self):
all_instances = []
for network in self.network_list:
all_instances.extend(network.get_all_instances())
return all_instances
def delete_network_resources(self):
"""
Delete all network and compute resources
associated with a router
"""
for network in self.network_list:
# Now delete the compute resources and the network resources
network.delete_compute_resources()
if network.network:
self.remove_router_interface(network)
network.delete_network()
# Also delete the shared port and remove it from router interface
if self.shared_network:
for _ in range(10):
try:
self.remove_router_interface(self.shared_network, use_port=True)
self.shared_network = None
break
except Exception:
time.sleep(1)
def create_router(self, router_name, ext_net):
"""
Create the router and attach it to
external network
"""
# Attach an external network if available
if ext_net:
body = {
"router": {
"name": router_name,
"admin_state_up": True,
"external_gateway_info": {
"network_id": ext_net['id']
}
}
}
else:
body = {
"router": {
"name": router_name,
"admin_state_up": True
}
}
self.router = self.neutron_client.create_router(body)
return self.router['router']
def delete_router(self):
"""
Delete the router
Also delete the networks attached to this router
"""
# Delete the network resources first and than delete the router itself
self.delete_network_resources()
for _ in range(10):
try:
self.neutron_client.remove_gateway_router(self.router['router']['id'])
self.shared_network = None
break
except Exception:
time.sleep(1)
for _ in range(10):
try:
self.neutron_client.delete_router(self.router['router']['id'])
break
except Exception:
time.sleep(1)
def _port_create_neutron(self, network_instance):
"""
Creates a port on a specific network
"""
body = {
"port": {
"admin_state_up": True,
"network_id": network_instance.network['id']
}
}
post_output = self.neutron_client.create_port(body)
self.shared_interface_ip = post_output['port']['fixed_ips'][0]['ip_address']
return post_output['port']['id']
def _port_delete_neutron(self, port):
self.neutron_client.delete_port(port)
def attach_router_interface(self, network_instance, use_port=False):
"""
Attach a network interface to the router
"""
# If shared port is specified use that
if use_port:
self.shared_port_id = self._port_create_neutron(network_instance)
body = {
'port_id': self.shared_port_id
}
else:
body = {
'subnet_id': network_instance.network['subnets'][0]
}
self.neutron_client.add_interface_router(self.router['router']['id'], body)
def remove_router_interface(self, network_instance, use_port=False):
"""
Remove the network interface from router
"""
if use_port:
body = {
'port_id': self.shared_port_id
}
else:
body = {
'subnet_id': network_instance.network['subnets'][0]
}
self.neutron_client.remove_interface_router(self.router['router']['id'], body)
class NeutronQuota(object):
def __init__(self, neutronclient, tenant_id):
self.neutronclient = neutronclient
self.tenant_id = tenant_id
def get(self):
return self.neutronclient.show_quota(self.tenant_id)['quota']
def update_quota(self, quotas):
body = {
'quota': quotas
}
self.neutronclient.update_quota(self.tenant_id, body)

View File

@ -1,123 +0,0 @@
# KloudBuster Default configuration file
# Name of the image to use for all test VMs (client, server and proxy)
# The image name must exist in OpenStack and must be built with the appropriate
# packages
image_name: 'KloudBuster Image'
# Config options common to client and server side
keystone_admin_role: "admin"
# Cleanup all kloudbuster resources upon exit
cleanup_resources: True
# VM creation concurrency
vm_creation_concurrency: 5
#
# ssh access to the test VMs launched by kloudbuster is not required
# but can be handy if the user wants to ssh manually to any of them (for example
# to debug)
# public key to use to access all test VMs
# if empty will default to the user's public key (~/.ssh/id_rsa.pub) if it
# exists, otherwise will not provision any public key.
# If configured or available, a key pair will be added for each
# configured user.
#
public_key_file:
# SERVER SIDE CONFIG OPTIONS
server:
# Flavor to use for the test images
flavor:
# Number of vCPUs for the flavor
vcpus: 1
# Memory for the flavor in MB
ram: 2048
# Size of local disk in GB
disk: 20
# Number of tenants to be created on the cloud
number_tenants: 1
# Number of Users to be created inside the tenant
# For now support only 1 user per tenant
users_per_tenant: 1
# Number of routers to be created within the context of each User
routers_per_user: 1
# Number of networks to be created within the context of each Router
# Assumes 1 subnet per network
networks_per_router: 1
# Number of VM instances to be created within the context of each Network
vms_per_network: 1
# Number of security groups per network
secgroups_per_network: 1
# Assign floating IP for every VM
use_floatingip: True
# Placement hint
# Availability zone to use for servers in the server cloud
# Leave empty if you prefer to have the Nova scheduler place the server VMs
# If you want to pick a particular AZ, put that AZ name (e.g. nova)
# If you want a paticular compute host, put the AZ and compute host names s
# eparated by ':' (e.g. nova:tme100)
availability_zone:
# CLIENT SIDE CONFIG OPTIONS
client:
# Assign floating IP for every VM
use_floatingip: True
# Flavor to use for the test images
flavor:
# Number of vCPUs for the flavor
vcpus: 1
# Memory for the flavor in MB
ram: 2048
# Size of local disk in GB
disk: 20
# Placement hint
# Availability zone to use for clients in the client cloud
# Leave empty if you prefer to have the Nova scheduler place the server VMs
# If you want to pick a particular AZ, put that AZ name (e.g. nova)
# If you want a paticular compute host, put the AZ and compute host names s
# eparated by ':' (e.g. nova:tme100)
availability_zone:
# Interval for polling status from all VMs
polling_interval: 5
# Tooling
tp_tool:
name: 'nuttcp'
dest_path: '/usr/bin/nuttcp'
http_tool:
name: 'wrk'
dest_path: '/usr/local/bin/wrk2'
# HTTP tool specific configs (per VM)
http_tool_configs:
# Threads to run tests
threads: 1
# Connections to be kept concurrently per VM
connections: 1000
# Rate limit in RPS per client (0 for unlimit)
rate_limit: 500
# Timeout for HTTP requests
timeout: 5
# Connection Type: "Keep-alive", "New"
connection_type: 'Keep-alive'
# Interval for periodical report
report_interval: 5
# Duration of testing tools (seconds)
duration: 30
# Prompt before running benchmarking tools
prompt_before_run: False

View File

@ -1,26 +0,0 @@
# KloudBuster tenant and users files
# Usage: Pass to KloudBuster using "-l"
#
# This file contains the a list of tenants and users that will be reused by
# KloudBuster instead of creating them. This is useful when running
# KloudBuster on clouds without admin permissions.
#
# Settings in this file has higher priority than user configs. It determines
# the final count of tenants and useres that KloudBuster will use.
#
# If running under tenant/user reusing mode, KloudBuster will use *only* one
# tenant to hold the resources for both server cloud and client cloud.
#
# NOTE:
# (1) For now, we only support one user per tenant;
# (2) Under tenant/user resuing mode, all resources will be sitting under
# the same tenant, so there will be fixed *ONLY* one user for holding
# client side resources;
tenant_name: demo_tenant
server_user:
- username: demo_user_1
password: demo_user_1
client_user:
username: demo_user_2
password: demo_user_2

View File

@ -1,11 +0,0 @@
# Compute host topology file for running KloudBuster
# Usage: Pass to KloudBuster using -t
# The compute host name must be exactly the same as shown from NOVA:
# i.e. "nova hypervisor-list"
servers_rack:
hh23-5
clients_rack:
hh23-6

View File

@ -1,120 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Module for credentials in Openstack
import getpass
import os
import re
import log as logging
LOG = logging.getLogger(__name__)
class Credentials(object):
def get_credentials(self):
dct = {}
dct['username'] = self.rc_username
dct['password'] = self.rc_password
dct['auth_url'] = self.rc_auth_url
dct['tenant_name'] = self.rc_tenant_name
return dct
def get_nova_credentials(self):
dct = {}
dct['username'] = self.rc_username
dct['api_key'] = self.rc_password
dct['auth_url'] = self.rc_auth_url
dct['project_id'] = self.rc_tenant_name
return dct
def get_nova_credentials_v2(self):
dct = self.get_nova_credentials()
dct['version'] = 2
return dct
def _init_with_openrc_(self, openrc_contents):
export_re = re.compile('export OS_([A-Z_]*)="?(.*)')
for line in openrc_contents.splitlines():
line = line.strip()
mstr = export_re.match(line)
if mstr:
# get rid of posible trailing double quote
# the first one was removed by the re
name = mstr.group(1)
value = mstr.group(2)
if value.endswith('"'):
value = value[:-1]
# get rid of password assignment
# echo "Please enter your OpenStack Password: "
# read -sr OS_PASSWORD_INPUT
# export OS_PASSWORD=$OS_PASSWORD_INPUT
if value.startswith('$'):
continue
# now match against wanted variable names
if name == 'USERNAME':
self.rc_username = value
elif name == 'AUTH_URL':
self.rc_auth_url = value
elif name == 'TENANT_NAME':
self.rc_tenant_name = value
# Read a openrc file and take care of the password
# The 2 args are passed from the command line and can be None
def __init__(self, openrc_file=None, openrc_contents=None, pwd=None, no_env=False):
self.rc_password = None
self.rc_username = None
self.rc_tenant_name = None
self.rc_auth_url = None
self.openrc_contents = openrc_contents
success = True
if openrc_file:
if os.path.exists(openrc_file):
self.openrc_contents = open(openrc_file).read()
else:
LOG.error("rc file does not exist %s" % openrc_file)
success = False
return
if self.openrc_contents:
self._init_with_openrc_(self.openrc_contents)
elif not no_env:
# no openrc file passed - we assume the variables have been
# sourced by the calling shell
# just check that they are present
for varname in ['OS_USERNAME', 'OS_AUTH_URL', 'OS_TENANT_NAME']:
if varname not in os.environ:
LOG.warn("%s is missing" % varname)
success = False
if success:
self.rc_username = os.environ['OS_USERNAME']
self.rc_auth_url = os.environ['OS_AUTH_URL']
self.rc_tenant_name = os.environ['OS_TENANT_NAME']
# always override with CLI argument if provided
if pwd:
self.rc_password = pwd
# if password not know, check from env variable
elif self.rc_auth_url and not self.rc_password and success:
if 'OS_PASSWORD' in os.environ and not no_env:
self.rc_password = os.environ['OS_PASSWORD']
else:
# interactively ask for password
self.rc_password = getpass.getpass(
'Please enter your OpenStack Password: ')
if not self.rc_password:
self.rc_password = ""

View File

@ -1,52 +0,0 @@
====================================
KLOUDBUSTER IMAGE BUILD INSTRUCTIONS
====================================
There are 2 ways to build the kloudbuster image:
* using Vagrant (recommended to build the image on Mac)
* using the build-image.sh script (recommended to build the image on Linux)
Build on Mac OSX
================
Pre-Requisites
--------------
* must have access to the Internet (to allow download of packages)
* must install Vagrant (https://www.vagrantup.com/downloads.html)
* must install VirtualBox (https://www.virtualbox.org/wiki/Downloads)
Instructions
------------
* Open a shell window
* cd to the scale/dib directory (where this README.rst file resides)
* run vagrant: "vagrant up"
The build should take around 5-7 minutes (may vary depending on the speed of your Internet connection) and you should see the kloudbuster.qcow2 image appear in the current directory.
After the image is built, simply discard the vagrant VM: "vagrant destroy"
Build on Linux
==============
Pre-Requisites
--------------
* must have access to the Internet (to allow download of packages)
* must install git
* must install qemu-utils
Instructions
------------
* clone the kloudbuster git repository somewhere
* git clone -b kloudbuster git://github.com/stackforge/vmtp.git
* cd vmtp/scale/dib
* ./build-image.sh
The build should take around 5-7 minutes (may vary depending on the speed of your Internet connection) and you should see the kloudbuster.qcow2 image appear in the current directory.
After the image is built, move the image in a safe location and delete the vmtp directory.

93
scale/dib/Vagrantfile vendored
View File

@ -1,93 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# This vagrant file will create a VM to build the koudbuster qcow2 image
# automatically
# Script for installing the diskimage-builder and kloudbuster repo
#
$script = <<SCRIPT
apt-get update
apt-get -y install git
apt-get -y install qemu-utils
# install diskimage-builder
git clone git://github.com/openstack/diskimage-builder.git
git clone git://github.com/openstack/dib-utils.git
# install kloudbuster
git clone -b kloudbuster git://github.com/stackforge/vmtp.git
# Add diskimage-builder and dib-utils bin to the path
export PATH=$PATH:`pwd`/diskimage-builder/bin:`pwd`/dib-utils/bin
# Add the kloudbuster elements directory to the DIB elements path
export ELEMENTS_PATH=`pwd`/vmtp/scale/dib/elements
time disk-image-create -o kloudbuster ubuntu kloudbuster
mv kloudbuster.qcow2 /vagrant
SCRIPT
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(2) do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
config.vm.box = "ubuntu/trusty64"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
config.vm.provider "virtualbox" do |vb|
# Display the VirtualBox GUI when booting the machine
# vb.gui = true
# Customize the amount of memory on the VM:
vb.memory = "2048"
end
#
# View the documentation for the provider you are using for more
# information on available options.
# Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
# such as FTP and Heroku are also available. See the documentation at
# https://docs.vagrantup.com/v2/push/atlas.html for more information.
# config.push.define "atlas" do |push|
# push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
# end
# Enable provisioning with a shell script. Additional provisioners such as
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
# documentation for more information about their specific syntax and use.
config.vm.provision "shell", inline: $script
end

View File

@ -1,26 +0,0 @@
#!/bin/bash
#
# A shell script to build the kloudbuster image using diskinage-builder
#
# The following packages must be installed prior to using this script:
# sudo apt-get -y install git
# sudo apt-get -y install qemu-utils
# install diskimage-builder
git clone git://github.com/openstack/diskimage-builder.git
git clone git://github.com/openstack/dib-utils.git
# Add diskimage-builder and dib-utils bin to the path
export PATH=$PATH:`pwd`/diskimage-builder/bin:`pwd`/dib-utils/bin
# Add the kloudbuster elements directory to the DIB elements path
export ELEMENTS_PATH=`pwd`/elements
time disk-image-create -o kloudbuster ubuntu kloudbuster
ls -l kloudbuster.qcow2
# cleanup
rm -rf diskimage-builder dib-utils

View File

@ -1,10 +0,0 @@
===========
KloudBuster
===========
KloudBuster Image
Contains all the packages and files needed to run a universal KloudBuster VM
The same image can run using one of the following roles (Assigned from the user-data python program):
- Server VM for a given traffic type (e.g. http server or tcp/udp server)
- Client VM for a given traffic type (e.g. http client or tcp/udp client)
- Redis server (only 1 instance in the client cloud)

View File

@ -1,3 +0,0 @@
vm
install-static
package-installs

View File

@ -1,10 +0,0 @@
nginx:
iperf:
nuttcp:
redis-server:
python-pip:
git:
build-essential:
libssl-dev:

View File

@ -1,80 +0,0 @@
# script for kloudbuster
# Tune the Linux kernel for scale test
echo "* soft nofile 102400" >> /etc/security/limits.conf
echo "* hard nofile 102400" >> /etc/security/limits.conf
cat << EOF >> /etc/sysctl.conf
fs.file-max=6553550
net.core.wmem_max=8388608
net.core.wmem_default=8388608
net.core.rmem_max=33554432
net.core.rmem_default=33554432
net.core.netdev_max_backlog=100000
net.ipv4.icmp_ratelimit=0
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_max_tw_buckets=65536
net.ipv4.tcp_fin_timeout=15
net.ipv4.tcp_max_syn_backlog=65536
net.ipv4.tcp_syncookies=1
net.ipv4.neigh.default.gc_thresh1=4096
net.ipv4.neigh.default.gc_thresh2=4096
net.ipv4.neigh.default.gc_thresh3=4096
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.all.arp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_filter=0
net.ipv4.conf.eth0.rp_filter=0
net.ipv4.conf.eth0.arp_filter=0
EOF
sysctl -p
# do not autostart the redis server or the nginx server
# let the KB agent start the appropriate server (if applicable)
update-rc.d -f redis-server remove
update-rc.d -f nginx remove
# Change permission to 777 for kb_test
chmod -R 777 /kb_test
# Generate a 32K HTML files
mkdir -p /data/www
cd /data/www/
dd if=/dev/zero of=index.html bs=32K count=1
chmod -R 777 /data
cd -
# redis server should listen on all interfaces
sed -i "s/127.0.0.1/0.0.0.0/g" /etc/redis/redis.conf
# if started nginx should be allowed to open more file descriptors
sed -i 's/start-stop-daemon\ --start/ulimit\ \-n\ 102400\n\t\0/g' /etc/init.d/nginx
# Auto start the KloudBuster Agent, with user-data
sed -i "s/^exit\s0/cd \/kb_test\n\0/g" /etc/rc.local
sed -i "s/^exit\s0/wget http\:\/\/169.254.169.254\/latest\/user-data\n\0/g" /etc/rc.local
sed -i "s/^exit\s0/python \/kb_test\/kb_vm_agent.py \&\n\0/g" /etc/rc.local
# ======
# Client
# ======
# python redis client
pip install redis
# install the http traffic generator
git clone git://github.com/yicwang/wrk2.git
cd wrk2
make
mv wrk /usr/local/bin/wrk2
cd ..
rm -rf wrk2
# uninstall unneeded packages
apt-get -y --purge remove git
apt-get -y --purge remove python-pip
apt-get -y --purge remove build-essential
apt-get -y --purge autoremove
apt-get -y autoclean

View File

@ -1,33 +0,0 @@
#!/usr/bin/env python
import yaml
cloudcfg = "/etc/cloud/cloud.cfg"
user = "cloud-user"
with open(cloudcfg) as f:
cfg = yaml.load(f)
try:
if cfg['system_info']['default_user']['name']:
synver = "2"
except KeyError:
synver = "1"
if synver == "1":
if cfg['user'] == user:
print("No change needed")
exit()
else:
cfg['user'] = user
elif synver == "2":
if cfg['system_info']['default_user']['name'] == user:
print("No change needed")
exit()
else:
# Change the user to cloud-user
cfg['system_info']['default_user']['name'] = user
cfg['system_info']['default_user']['gecos'] = "Cloud User"
print cfg['system_info']['default_user']['name']
with open(cloudcfg, "w") as f:
yaml.dump(cfg, f, default_flow_style=False)

View File

@ -1,22 +0,0 @@
user www-data;
worker_processes 8;
pid /run/nginx.pid;
events {
worker_connections 2048;
}
http {
server {
location / {
root /data/www;
access_log off;
}
location /status {
stub_status on;
access_log off;
#deny all;
allow all;
}
}
}

View File

@ -1,273 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import subprocess
import sys
import threading
import time
import redis
# Define the version of the KloudBuster agent.
#
# When VM is up running, the agent will send the READY message to the
# KloudBuster main program, along with its version. The main program
# will check the version to see whether the image meets the minimum
# requirements to run, and stopped with an error if not.
#
# Note: All majors are compatible regardless of minor.
__version__ = '1.0'
class KB_Instance(object):
# Check whether the HTTP Service is up running
@staticmethod
def check_http_service(target_url):
cmd = 'while true; do\n'
cmd += 'curl --head %s --connect-timeout 2 --silent\n' % (target_url)
cmd += 'if [ $? -eq 0 ]; then break; fi\n'
cmd += 'done'
return cmd
# Add static route
@staticmethod
def add_static_route(network, next_hop_ip, if_name=None):
debug_msg = "Adding static route %s with next hop %s" % (network, next_hop_ip)
cmd = "sudo ip route add %s via %s" % (network, next_hop_ip)
if if_name:
debug_msg += " and %s" % if_name
cmd += " dev %s" % if_name
# TODO(Logging on Agent)
print debug_msg
return cmd
# Get static route
@staticmethod
def get_static_route(network, next_hop_ip=None, if_name=None):
cmd = "ip route show %s" % network
if next_hop_ip:
cmd += " via %s" % next_hop_ip
if if_name:
cmd += " dev %s" % if_name
return cmd
# Delete static route
@staticmethod
def delete_static_route(network, next_hop_ip=None, if_name=None):
debug_msg = "Deleting static route %s" % network
cmd = "sudo ip route del %s" % network
if next_hop_ip:
debug_msg = " with next hop %s" % next_hop_ip
cmd += " via %s" % next_hop_ip
if if_name:
if next_hop_ip:
debug_msg = " and %s" % if_name
else:
debug_msg = "with next hop %s" % if_name
cmd += " dev %s" % if_name
# TODO(Logging on Agent)
print debug_msg
return cmd
# Run the HTTP benchmarking tool
@staticmethod
def run_http_test(dest_path, target_url, threads, connections,
rate_limit, duration, timeout, connection_type,
report_interval):
if not rate_limit:
rate_limit = 65535
cmd = '%s -t%d -c%d -R%d -d%ds -p%ds --timeout %ds -D2 -j %s' % \
(dest_path, threads, connections, rate_limit, duration,
report_interval, timeout, target_url)
return cmd
class KB_VM_Agent(object):
def __init__(self, user_data):
host = user_data['redis_server']
port = user_data['redis_server_port']
self.user_data = user_data
self.redis_obj = redis.StrictRedis(host=host, port=port)
self.pubsub = self.redis_obj.pubsub(ignore_subscribe_messages=True)
self.hello_thread = None
self.stop_hello = threading.Event()
self.vm_name = user_data['vm_name']
self.orches_chan_name = "kloudbuster_orches"
self.report_chan_name = "kloudbuster_report"
self.last_cmd = None
def setup_channels(self):
# Check for connections to redis server
while (True):
try:
self.redis_obj.get("test")
except (redis.exceptions.ConnectionError):
time.sleep(1)
continue
break
# Subscribe to orchestration channel
self.pubsub.subscribe(self.orches_chan_name)
def report(self, cmd, client_type, data):
message = {'cmd': cmd, 'sender-id': self.vm_name,
'client-type': client_type, 'data': data}
self.redis_obj.publish(self.report_chan_name, message)
def send_hello(self):
# Sending "hello" message to master node every 2 seconds
while not self.stop_hello.is_set():
self.report('READY', None, __version__)
time.sleep(2)
def exec_command(self, cmd):
# Execute the command, and returns the outputs
cmds = ['bash', '-c']
cmds.append(cmd)
p = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout, stderr)
def exec_command_report(self, cmd):
# Execute the command, reporting periodically, and returns the outputs
cmd_res_dict = None
cmds = ['bash', '-c']
cmds.append(cmd)
p_output = ''
p = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
lines_iterator = iter(p.stdout.readline, b"")
for line in lines_iterator:
# One exception, if this is the very last report, we will send it
# through "DONE" command, not "REPORT". So what's happening here
# is to determine whether this is the last report.
if cmd_res_dict:
self.report('REPORT', 'http', cmd_res_dict)
cmd_res_dict = None
p_output = line
else:
p_output += line
if line.strip() == "}":
cmd_res_dict = dict(zip(("status", "stdout", "stderr"), (0, p_output, '')))
continue
stderr = p.communicate()[1]
return (p.returncode, p_output, stderr)
def process_cmd(self, message):
if message['cmd'] == 'ACK':
# When 'ACK' is received, means the master node
# acknowledged the current VM. So stopped sending more
# "hello" packet to the master node.
# Unfortunately, there is no thread.stop() in Python 2.x
self.stop_hello.set()
elif message['cmd'] == 'EXEC':
self.last_cmd = ""
try:
cmd_res_tuple = eval('self.exec_' + message['data']['cmd'] + '()')
cmd_res_dict = dict(zip(("status", "stdout", "stderr"), cmd_res_tuple))
except Exception as exc:
cmd_res_dict = {
"status": 1,
"stdout": self.last_cmd,
"stderr": str(exc)
}
self.report('DONE', message['client-type'], cmd_res_dict)
elif message['cmd'] == 'ABORT':
# TODO(Add support to abort a session)
pass
else:
# Unexpected
# TODO(Logging on Agent)
print 'ERROR: Unexpected command received!'
pass
def work(self):
for item in self.pubsub.listen():
if item['type'] != 'message':
continue
# Convert the string representation of dict to real dict obj
message = eval(item['data'])
self.process_cmd(message)
def exec_setup_static_route(self):
self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip'])
result = self.exec_command(self.last_cmd)
if (self.user_data['target_subnet_ip'] not in result[1]):
self.last_cmd = KB_Instance.add_static_route(
self.user_data['target_subnet_ip'],
self.user_data['target_shared_interface_ip'])
return self.exec_command(self.last_cmd)
else:
return (0, '', '')
def exec_check_http_service(self):
self.last_cmd = KB_Instance.check_http_service(self.user_data['target_url'])
return self.exec_command(self.last_cmd)
def exec_run_http_test(self):
self.last_cmd = KB_Instance.run_http_test(
dest_path=self.user_data['http_tool']['dest_path'],
target_url=self.user_data['target_url'],
**self.user_data['http_tool_configs'])
return self.exec_command_report(self.last_cmd)
def exec_command(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return p.returncode
def start_redis_server():
cmd = ['sudo', 'service', 'redis-server', 'start']
return exec_command(cmd)
def start_nuttcp_server():
cmd = ['/usr/bin/nuttcp', '-P5002', '-S', '--single-threaded']
return exec_command(cmd)
def start_nginx_server():
cmd = ['sudo', 'service', 'nginx', 'start']
return exec_command(cmd)
if __name__ == "__main__":
try:
f = open('user-data', 'r')
user_data = eval(f.read())
except Exception as e:
# TODO(Logging on Agent)
print e.message
sys.exit(1)
if 'role' not in user_data:
sys.exit(1)
if user_data['role'] == 'KB-PROXY':
sys.exit(start_redis_server())
if user_data['role'] == 'Server':
rc1 = start_nuttcp_server()
rc2 = start_nginx_server()
sys.exit(rc1 or rc2)
elif user_data['role'] == 'Client':
agent = KB_VM_Agent(user_data)
agent.setup_channels()
agent.hello_thread = threading.Thread(target=agent.send_hello)
agent.hello_thread.daemon = True
agent.hello_thread.start()
agent.work()
else:
sys.exit(1)

View File

@ -1,82 +0,0 @@
#! /bin/bash
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# A tool that can represent KloudBuster json results in
# a nicer form using HTML5, bootstrap.js and the Google Charts Javascript library
###############################################################################
# #
# This is a helper script which will delete all resources created by #
# KloudBuster. #
# #
# Normally, KloudBuster will clean up automatically when it is done. However, #
# sometimes errors or timeouts happen during the rescource creation stage, #
# which will cause KloudBuster out of sync with the real environment. If that #
# happens, a force cleanup may be needed. #
# #
# This script will simply grep the resource name with "KB" and delete them. #
# If running on a production network, please double and triple check all #
# resources names are *NOT( containing "KB", otherwise they will be deleted #
# when running with this script. #
# #
###############################################################################
# WARNING! WARNING! WARNING!
# IMPORTANT FOR RUNNING KLOUDBUSTER ON PRODUCTION CLOUDS
# ======================================================
#
# DOUBLE CHECK THE NAMES OF ALL RESOURCES THAT DOES NOT BELONG TO KLOUDBUSTER
# ARE *NOT* CONTAINING "KB"
for line in `nova list --all-tenants | grep KB | cut -d'|' -f2`; do
nova delete $line
done
echo -e "`neutron floatingip-list | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+'`" | while read line; do
fid=`echo $line | cut -d'|' -f2 | xargs`
portid=`echo $line | cut -d'|' -f5 | xargs`
if [ "$fid" != "" ] && [ "$portid" = "" ]; then
neutron floatingip-delete $fid &
fi
done;
for line in `neutron security-group-list | grep KB | cut -d'|' -f2`; do
neutron security-group-delete $line &
done;
for line in `nova flavor-list | grep kb | cut -d'|' -f3`; do
nova flavor-delete $line &
done;
for line in `neutron router-list | grep KB | cut -d'|' -f2`; do
neutron router-gateway-clear $line
for line2 in `neutron router-port-list $line | grep subnet | cut -d'"' -f4`; do
neutron router-interface-delete $line $line2
done
neutron router-delete $line
done
for line in `neutron net-list | grep KB | cut -d'|' -f2`; do
neutron net-delete $line
done
for line in `keystone tenant-list | grep KB | cut -d'|' -f2`; do
keystone tenant-delete $line
done
for line in `keystone user-list | grep KB | cut -d'|' -f2`; do
keystone user-delete $line
done

View File

@ -1,161 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import configure
import log as logging
from oslo_config import cfg
import credentials
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class KBConfigParseException(Exception):
pass
# Some hardcoded client side options we do not want users to change
hardcoded_client_cfg = {
# Number of tenants to be created on the cloud
'number_tenants': 1,
# Number of Users to be created inside the tenant
'users_per_tenant': 1,
# Number of routers to be created within the context of each User
# For now support only 1 router per user
'routers_per_user': 1,
# Number of networks to be created within the context of each Router
# Assumes 1 subnet per network
'networks_per_router': 1,
# Number of VM instances to be created within the context of each Network
'vms_per_network': 1,
# Number of security groups per network
'secgroups_per_network': 1
}
def get_absolute_path_for_file(file_name):
'''
Return the filename in absolute path for any file
passed as relateive path.
'''
if os.path.isabs(__file__):
abs_file_path = os.path.join(__file__.split("kb_config.py")[0],
file_name)
else:
abs_file = os.path.abspath(__file__)
abs_file_path = os.path.join(abs_file.split("kb_config.py")[0],
file_name)
return abs_file_path
class KBConfig(object):
def __init__(self):
# The default configuration file for KloudBuster
default_cfg_file = get_absolute_path_for_file("cfg.scale.yaml")
# Read the configuration file
self.config_scale = configure.Configuration.from_file(default_cfg_file).configure()
self.cred_tested = None
self.cred_testing = None
self.server_cfg = None
self.client_cfg = None
self.topo_cfg = None
self.tenants_list = None
def update_configs(self):
# Initialize the key pair name
if self.config_scale['public_key_file']:
# verify the public key file exists
if not os.path.exists(self.config_scale['public_key_file']):
LOG.error('Error: Invalid public key file: ' + self.config_scale['public_key_file'])
sys.exit(1)
else:
# pick the user's public key if there is one
pub_key = os.path.expanduser('~/.ssh/id_rsa.pub')
if os.path.isfile(pub_key):
self.config_scale['public_key_file'] = pub_key
LOG.info('Using %s as public key for all VMs' % (pub_key))
# A bit of config dict surgery, extract out the client and server side
# and transplant the remaining (common part) into the client and server dict
self.server_cfg = self.config_scale.pop('server')
self.client_cfg = self.config_scale.pop('client')
self.server_cfg.update(self.config_scale)
self.client_cfg.update(self.config_scale)
# Hardcode a few client side options
self.client_cfg.update(hardcoded_client_cfg)
# Adjust the VMs per network on the client side to match the total
# VMs on the server side (1:1)
# There is an additional VM in client kloud as a proxy node
self.client_cfg['vms_per_network'] =\
self.get_total_vm_count(self.server_cfg) + 1
def init_with_cli(self):
self.get_credentials()
self.get_configs()
self.get_topo_cfg()
self.get_tenants_list()
self.update_configs()
def init_with_rest_api(self, **kwargs):
self.cred_tested = kwargs['cred_tested']
self.cred_testing = kwargs['cred_testing']
self.topo_cfg = kwargs['topo_cfg']
self.tenants_list = kwargs['tenants_list']
self.update_configs()
def get_total_vm_count(self, config):
return (config['number_tenants'] * config['users_per_tenant'] *
config['routers_per_user'] * config['networks_per_router'] *
config['vms_per_network'])
def get_credentials(self):
# Retrieve the credentials
self.cred_tested = credentials.Credentials(openrc_file=CONF.tested_rc,
pwd=CONF.tested_passwd,
no_env=CONF.no_env)
if CONF.testing_rc and CONF.testing_rc != CONF.tested_rc:
self.cred_testing = credentials.Credentials(openrc_file=CONF.testing_rc,
pwd=CONF.testing_passwd,
no_env=CONF.no_env)
else:
# Use the same openrc file for both cases
self.cred_testing = self.cred_tested
def get_configs(self):
if CONF.config:
alt_config = configure.Configuration.from_file(CONF.config).configure()
self.config_scale = self.config_scale.merge(alt_config)
def get_topo_cfg(self):
if CONF.topology:
self.topo_cfg = configure.Configuration.from_file(CONF.topology).configure()
def get_tenants_list(self):
if CONF.tenants_list:
self.tenants_list = configure.Configuration.from_file(CONF.tenants_list).configure()
try:
self.config_scale['number_tenants'] = 1
self.config_scale['users_per_tenant'] = len(self.tenants_list['server_user'])
except Exception as e:
LOG.error('Cannot parse the count of tenant/user from the config file.')
raise KBConfigParseException(e.message)

View File

@ -1,217 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# A tool that can represent KloudBuster json results in
# a nicer form using HTML5, bootstrap.js and the Google Charts Javascript library
#
import argparse
import json
import os
import os.path
import sys
import webbrowser
from jinja2 import Environment
from jinja2 import FileSystemLoader
__version__ = '0.0.1'
kb_html_tpl = "./kb_tpl.jinja"
def get_formatted_num(value):
return '{:,}'.format(value)
# table column names
col_names = ['<i class="glyphicon glyphicon-file"></i> File',
'<i class="glyphicon glyphicon-random"></i> Connections',
'<i class="glyphicon glyphicon-book"></i> Server VMs',
'<i class="glyphicon glyphicon-transfer"></i> Requests',
'<i class="glyphicon glyphicon-fire"></i> Socket errors',
'<i class="glyphicon glyphicon-time"></i> RPS measured',
'<i class="glyphicon glyphicon-pencil"></i> RPS requested',
'<i class="glyphicon glyphicon-cloud-download"></i> RX throughput (Gbps)']
def get_new_latency_tuples():
'''Returns a list of lists initializedas follows
The latency tuples must be formatted like this:
['Percentile', 'fileA', 'fileB'],
[2.0, 1, 3],
[4.0, 27, 38],
etc, with the first column being calculated from the percentile
using the formula (1/(1-percentile))
50% -> 1/0.5 = 2
75% -> 4
etc...
This horizontal scaling is used to stretch the chart at the top end
(towards 100%)
'''
return [
['Percentile'], # add run file name
[2], # add run 50% latency
[4], # add run 75% latency
[10],
[100],
[1000],
[10000],
[100000] # add run 99.999% latency
]
class KbReport(object):
def __init__(self, data_list, line_rate):
self.data_list = data_list
self.latency_tuples = get_new_latency_tuples()
self.common_stats = []
self.table = None
template_loader = FileSystemLoader(searchpath=".")
template_env = Environment(loader=template_loader)
self.tpl = template_env.get_template(kb_html_tpl)
self.line_rate = line_rate
def add_latency_stats(self, run_results):
# init a column list
column = [run_results['filename']]
for latency_pair in run_results['latency_stats']:
# convert from usec to msec
latency_ms = latency_pair[1] / 1000
column.append(latency_ms)
# and append that column to the latency list
for pct_list, colval in zip(self.latency_tuples, column):
pct_list.append(colval)
def prepare_table(self):
table = {}
table['col_names'] = col_names
# add values for each row
rows = []
for run_res in self.data_list:
rps_max = run_res['http_rate_limit'] * run_res['total_client_vms']
rx_tp = float(run_res['http_throughput_kbytes'])
rx_tp = round(rx_tp * 8 / (1024 * 1024), 1)
cells = [run_res['filename'],
get_formatted_num(run_res['total_connections']),
get_formatted_num(run_res['total_server_vms']),
get_formatted_num(run_res['http_total_req']),
get_formatted_num(run_res['http_sock_err'] + run_res['http_sock_timeout']),
get_formatted_num(run_res['http_rps']),
get_formatted_num(rps_max)]
row = {'cells': cells,
'rx': {'value': rx_tp,
'max': self.line_rate,
'percent': (rx_tp * 100) / self.line_rate}}
rows.append(row)
table['rows'] = rows
self.table = table
def plot(self, dest_file):
for run_results in self.data_list:
self.add_latency_stats(run_results)
self.prepare_table()
kbstats = {
'table': self.table,
'latency_tuples': self.latency_tuples,
'search_page': 'true' if len(self.data_list) > 10 else 'false'
}
with open(dest_file, 'w') as dest:
print('Generating chart drawing code to ' + dest_file + '...')
output = self.tpl.render(kbstats=kbstats)
dest.write(output)
def get_display_file_name(filename):
res = os.path.basename(filename)
# remove extension
res, _ = os.path.splitext(res)
return res
def guess_line_rate(data_list):
max_tp_kb = 0
for data_list in data_list:
max_tp_kb = max(max_tp_kb, data_list['http_throughput_kbytes'])
max_tp_gb = (max_tp_kb * 8) / (1000 * 1000)
# typical GE line rates are 10, 40 and 100
if max_tp_gb < 10:
return 10
if max_tp_gb < 40:
return 40
return 100
def gen_chart(file_list, chart_dest, browser, line_rate):
data_list = []
for res_file in file_list:
print 'processing: ' + res_file
if not os.path.isfile(res_file):
print('Error: No such file %s: ' + res_file)
sys.exit(1)
with open(res_file) as data_file:
results = json.load(data_file)
results['filename'] = get_display_file_name(res_file)
data_list.append(results)
if not line_rate:
line_rate = guess_line_rate(data_list)
print line_rate
chart = KbReport(data_list, line_rate)
print('Generating report to ' + chart_dest + '...')
chart.plot(chart_dest)
if browser:
url = 'file://' + os.path.abspath(chart_dest)
webbrowser.open(url, new=2)
def get_absolute_path_for_file(file_name):
'''
Return the filename in absolute path for any file
passed as relateive path.
'''
abs_file = os.path.dirname(os.path.abspath(__file__))
return abs_file + '/' + file_name
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='KloudBuster Chart Generator V' + __version__)
parser.add_argument('-c', '--chart', dest='chart',
action='store',
help='create and save chart in html file',
metavar='<file>')
parser.add_argument('-b', '--browser', dest='browser',
action='store_true',
default=False,
help='display (-c) chart in the browser')
parser.add_argument('-v', '--version', dest='version',
default=False,
action='store_true',
help='print version of this script and exit')
parser.add_argument('-l', '--line-rate', dest='line_rate',
action='store',
default=0,
type=int,
help='line rate in Gbps (default=10)',
metavar='<rate-Gbps>')
parser.add_argument(dest='files',
help='KloudBuster json result file', nargs="+",
metavar='<file>')
opts = parser.parse_args()
if opts.version:
print('Version ' + __version__)
sys.exit(0)
gen_chart(opts.files, opts.chart, opts.browser, opts.line_rate)

View File

@ -1,256 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils.version import LooseVersion
import time
import log as logging
import redis
LOG = logging.getLogger(__name__)
class KBVMUpException(Exception):
pass
class KBSetStaticRouteException(Exception):
pass
class KBHTTPServerUpException(Exception):
pass
class KBHTTPBenchException(Exception):
pass
class KBProxyConnectionException(Exception):
pass
class KBRunner(object):
"""
Control the testing VMs on the testing cloud
"""
def __init__(self, client_list, config, required_agent_version, single_cloud=True):
self.client_dict = dict(zip([x.vm_name for x in client_list], client_list))
self.config = config
self.single_cloud = single_cloud
self.result = {}
self.host_stats = {}
self.tool_result = {}
self.required_agent_version = str(required_agent_version)
self.agent_version = None
# Redis
self.redis_obj = None
self.pubsub = None
self.orches_chan_name = "kloudbuster_orches"
self.report_chan_name = "kloudbuster_report"
def setup_redis(self, redis_server, redis_server_port=6379, timeout=120):
LOG.info("Setting up the redis connections...")
connection_pool = redis.ConnectionPool(
host=redis_server, port=redis_server_port, db=0)
self.redis_obj = redis.StrictRedis(connection_pool=connection_pool,
socket_connect_timeout=1,
socket_timeout=1)
success = False
retry_count = max(timeout / self.config.polling_interval, 1)
# Check for connections to redis server
for retry in xrange(retry_count):
try:
self.redis_obj.get("test")
success = True
except (redis.exceptions.ConnectionError):
LOG.info("Connecting to redis server... Retry #%d/%d", retry, retry_count)
time.sleep(self.config.polling_interval)
continue
break
if not success:
LOG.error("Error: Cannot connect to the Redis server")
raise KBProxyConnectionException()
# Subscribe to message channel
self.pubsub = self.redis_obj.pubsub(ignore_subscribe_messages=True)
self.pubsub.subscribe(self.report_chan_name)
def dispose(self):
if self.pubsub:
self.pubsub.unsubscribe()
self.pubsub.close()
def send_cmd(self, cmd, client_type, data):
message = {'cmd': cmd, 'sender-id': 'kb-master',
'client-type': client_type, 'data': data}
LOG.kbdebug(message)
self.redis_obj.publish(self.orches_chan_name, message)
def polling_vms(self, timeout, polling_interval=None):
'''
Polling all VMs for the status of execution
Guarantee to run once if the timeout is less than polling_interval
'''
if not polling_interval:
polling_interval = self.config.polling_interval
retry_count = max(timeout / polling_interval, 1)
retry = cnt_succ = cnt_failed = 0
clist = self.client_dict.copy()
samples = []
http_tool = self.client_dict.values()[0].http_tool
while (retry < retry_count and len(clist)):
time.sleep(polling_interval)
sample_count = 0
while True:
msg = self.pubsub.get_message()
if not msg:
# No new message, commands are in executing
break
LOG.kbdebug(msg)
payload = eval(msg['data'])
vm_name = payload['sender-id']
instance = self.client_dict[vm_name]
cmd = payload['cmd']
if cmd == 'READY':
# If a READY packet is received, the corresponding VM is up
# running. We mark the flag for that VM, and skip all READY
# messages received afterwards.
if instance.up_flag:
continue
else:
clist[vm_name].up_flag = True
clist.pop(vm_name)
cnt_succ = cnt_succ + 1
self.agent_version = payload['data']
elif cmd == 'REPORT':
sample_count = sample_count + 1
# Parse the results from HTTP Tools
instance = self.client_dict[vm_name]
self.result[vm_name] = instance.http_client_parser(**payload['data'])
samples.append(self.result[vm_name])
elif cmd == 'DONE':
self.result[vm_name] = payload['data']
clist.pop(vm_name)
if self.result[vm_name]['status']:
# Command returned with non-zero status, command failed
LOG.error("[%s] %s", vm_name, self.result[vm_name]['stderr'])
cnt_failed = cnt_failed + 1
else:
# Command returned with zero, command succeed
cnt_succ = cnt_succ + 1
elif cmd == 'DEBUG':
LOG.info('[%s] %s' + (vm_name, payload['data']))
else:
LOG.error('[%s] received invalid command: %s' + (vm_name, cmd))
log_msg = "%d Succeed, %d Failed, %d Pending... Retry #%d" %\
(cnt_succ, cnt_failed, len(clist), retry)
if sample_count != 0:
log_msg += " (%d sample(s) received)" % sample_count
LOG.info(log_msg)
if sample_count != 0:
print http_tool.consolidate_samples(samples, len(self.client_dict))
samples = []
retry = retry + 1
return (cnt_succ, cnt_failed, len(clist))
def wait_for_vm_up(self, timeout=300):
cnt_succ = self.polling_vms(timeout)[0]
if cnt_succ != len(self.client_dict):
raise KBVMUpException()
self.send_cmd('ACK', None, None)
def setup_static_route(self, timeout=30):
func = {'cmd': 'setup_static_route'}
self.send_cmd('EXEC', 'http', func)
cnt_succ = self.polling_vms(timeout)[0]
if cnt_succ != len(self.client_dict):
raise KBSetStaticRouteException()
def check_http_service(self, timeout=30):
func = {'cmd': 'check_http_service'}
self.send_cmd('EXEC', 'http', func)
cnt_succ = self.polling_vms(timeout)[0]
if cnt_succ != len(self.client_dict):
raise KBHTTPServerUpException()
def run_http_test(self):
func = {'cmd': 'run_http_test'}
self.send_cmd('EXEC', 'http', func)
# Give additional 30 seconds for everybody to report results
timeout = self.config.http_tool_configs.duration + 30
cnt_pending = self.polling_vms(timeout)[2]
if cnt_pending != 0:
LOG.warn("Testing VMs are not returning results within grace period, "
"summary shown below may not be accurate!")
# Parse the results from HTTP Tools
for key, instance in self.client_dict.items():
self.result[key] = instance.http_client_parser(**self.result[key])
def gen_host_stats(self):
for vm in self.result.keys():
phy_host = self.client_dict[vm].host
if phy_host not in self.host_stats:
self.host_stats[phy_host] = []
self.host_stats[phy_host].append(self.result[vm])
http_tool = self.client_dict.values()[0].http_tool
for phy_host in self.host_stats:
self.host_stats[phy_host] = http_tool.consolidate_results(self.host_stats[phy_host])
def run(self):
try:
LOG.info("Waiting for agents on VMs to come up...")
self.wait_for_vm_up()
if not self.agent_version:
self.agent_version = "0.0"
if (LooseVersion(self.agent_version) < LooseVersion(self.required_agent_version)):
LOG.error("The VM image you are running is too old (%s), the minimum version "
"required is %s.x. Please build the image from latest repository." %
(self.agent_version, self.required_agent_version))
return
if self.single_cloud:
LOG.info("Setting up static route to reach tested cloud...")
self.setup_static_route()
LOG.info("Waiting for HTTP service to come up...")
self.check_http_service()
if self.config.prompt_before_run:
print "Press enter to start running benchmarking tools..."
raw_input()
LOG.info("Running HTTP Benchmarking...")
self.run_http_test()
# Call the method in corresponding tools to consolidate results
http_tool = self.client_dict.values()[0].http_tool
LOG.kbdebug(self.result.values())
self.tool_result = http_tool.consolidate_results(self.result.values())
self.tool_result['http_rate_limit'] = self.config.http_tool_configs.rate_limit
self.tool_result['total_connections'] =\
len(self.client_dict) * self.config.http_tool_configs.connections
self.gen_host_stats()
except (KBSetStaticRouteException):
LOG.error("Could not set static route.")
return
except (KBHTTPServerUpException):
LOG.error("HTTP service is not up in testing cloud.")
return
except KBHTTPBenchException():
LOG.error("Error in HTTP benchmarking.")
return

View File

@ -1,67 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import log as logging
LOG = logging.getLogger(__name__)
class KBVMMappingAlgoNotSup(Exception):
pass
class KBVMPlacementAlgoNotSup(Exception):
pass
class KBScheduler(object):
"""
1. VM Placements
2. Mapping client VMs to target servers
"""
@staticmethod
def setup_vm_placement(role, vm_list, topology, avail_zone, algorithm):
if not topology:
# Will use nova-scheduler to pick up the hypervisors
return
if not avail_zone:
# Default availability zone in NOVA
avail_zone = "nova"
if role == "Server":
host_list = topology.servers_rack.split()
else:
host_list = topology.clients_rack.split()
host_count = len(host_list)
if algorithm == "Round-robin":
host_idx = 0
for ins in vm_list:
ins.boot_info['avail_zone'] = "%s:%s" % (avail_zone, host_list[host_idx])
host_idx = (host_idx + 1) % host_count
else:
LOG.error("Unsupported algorithm!")
raise KBVMPlacementAlgoNotSup()
@staticmethod
def setup_vm_mappings(client_list, server_list, algorithm):
# VM Mapping framework/algorithm to mapping clients to servers.
# e.g. 1:1 mapping, 1:n mapping, n:1 mapping, etc.
# Here we only support N*1:1, i.e. 1 client VM maps to 1 server VM, total of N pairs.
if algorithm == "1:1":
for idx, ins in enumerate(client_list):
ins.target_url = "http://%s/index.html" %\
(server_list[idx].fip_ip or server_list[idx].fixed_ip)
ins.user_data['target_url'] = ins.target_url
else:
LOG.error("Unsupported algorithm!")
raise KBVMMappingAlgoNotSup()

View File

@ -1 +0,0 @@
recursive-include public *

View File

@ -1,2 +0,0 @@
Run below command to start the server:
pecan serve config.py

View File

@ -1,68 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'kb_server.controllers.root.RootController',
'modules': ['kb_server'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/kb_server/templates',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'kb_server': {'level': 'DEBUG', 'handlers': ['console']},
'pecan': {'level': 'DEBUG', 'handlers': ['console']},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
'[%(threadName)s] %(message)s'),
'__force_dict__': True
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf

View File

@ -1,27 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kb_server import model
from pecan import make_app
def setup_app(config):
model.init_model()
app_conf = dict(config.app)
return make_app(
app_conf.pop('root'),
logging=getattr(config, 'logging', {}),
**app_conf
)

View File

@ -1,109 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
kb_main_path = os.path.split(os.path.abspath(__file__))[0] + "/../../.."
sys.path.append(kb_main_path)
from credentials import Credentials
from configure import Configuration
from kb_config import KBConfig
from pecan import expose
from pecan import response
class ConfigController(object):
def __init__(self):
self.kb_config = KBConfig()
self.status = 'READY'
self.def_config = self.kb_config.config_scale
@expose(generic=True)
def default_config(self):
return str(self.def_config)
@expose(generic=True)
def running_config(self):
return str(self.kb_config.config_scale)
@running_config.when(method='POST')
def running_config_POST(self, args):
try:
# Expectation:
# {
# 'credentials': {'tested_rc': '<STRING>', 'passwd_tested': '<STRING>',
# 'testing_rc': '<STRING>', 'passwd_testing': '<STRING>'},
# 'kb_cfg': {<USER_OVERRIDED_CONFIGS>},
# 'topo_cfg': {<TOPOLOGY_CONFIGS>}
# 'tenants_cfg': {<TENANT_AND_USER_LISTS_FOR_REUSING>}
# }
user_config = eval(args)
# Parsing credentials from application input
cred_config = user_config['credentials']
cred_tested = Credentials(openrc_contents=cred_config['tested_rc'],
pwd=cred_config['passwd_tested'])
if ('testing_rc' in cred_config and
cred_config['testing_rc'] != cred_config['tested_rc']):
cred_testing = Credentials(openrc_contents=cred_config['testing_rc'],
pwd=cred_config['passwd_testing'])
else:
# Use the same openrc file for both cases
cred_testing = cred_tested
# Parsing server and client configs from application input
# Save the public key into a temporary file
if 'public_key' in user_config['kb_cfg']:
pubkey_filename = '/tmp/kb_public_key.pub'
f = open(pubkey_filename, 'w')
f.write(user_config['kb_cfg']['public_key_file'])
f.close()
self.kb_config.config_scale['public_key_file'] = pubkey_filename
alt_config = Configuration.from_string(user_config['kb_cfg']).configure()
self.kb_config.config_scale = self.kb_config.config_scale.merge(alt_config)
# Parsing topology configs from application input
if 'topo_cfg' in user_config:
topo_cfg = Configuration.from_string(user_config['topo_cfg']).configure()
else:
topo_cfg = None
# Parsing tenants configs from application input
if 'tenants_list' in user_config:
tenants_list = Configuration.from_string(user_config['tenants_list']).configure()
else:
tenants_list = None
except Exception as e:
response.status = 403
response.text = "Error while parsing configurations: %s" % e.message
return response.text
self.kb_config.init_with_rest_api(cred_tested=cred_tested,
cred_testing=cred_testing,
topo_cfg=topo_cfg,
tenants_list=tenants_list)
return str(self.kb_config.config_scale)
@expose(generic=True)
def status(self):
return "RETURN CURRENT STATUS HERE"
@status.when(method='PUT')
def status_PUT(self, **kw):
# @TODO(recursively update the config dictionary with the information
# provided by application (client))
return str(kw)

View File

@ -1,33 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from config import ConfigController
from pecan import abort
from pecan import expose
class APIController(object):
@expose()
def _lookup(self, primary_key, *remainder):
if primary_key == "config":
return ConfigController(), remainder
else:
abort(404)
class RootController(object):
@expose()
def _lookup(self, primary_key, *remainder):
if primary_key == "api":
return APIController(), remainder
else:
abort(404)

View File

@ -1,29 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pecan import conf # noqa
def init_model():
"""
This is a stub method which is called at application startup time.
If you need to bind to a parsed database configuration, set up tables or
ORM classes, or perform any database initialization, this is the
recommended place to do it.
For more information working with databases, and some common recipes,
see http://pecan.readthedocs.org/en/latest/databases.html
"""
pass

View File

@ -1,6 +0,0 @@
[nosetests]
match=^test
where=kb_server
nocapture=1
cover-package=kb_server
cover-erase=1

View File

@ -1,38 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
try:
from setuptools import find_packages
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import find_packages
from setuptools import setup
setup(
name='kb_server',
version='0.1',
description='',
author='',
author_email='',
install_requires=[
"pecan",
],
test_suite='kb_server',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
)

View File

@ -1,129 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<script src="http://code.jquery.com/jquery.min.js"></script>
<link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet" type="text/css" />
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
<script src="http://cdn.datatables.net/1.10.7/js/jquery.dataTables.min.js"></script>
<link href="http://cdn.datatables.net/1.10.7/css/jquery.dataTables.min.css" rel="stylesheet" type="text/css" />
<style media="all" type="text/css">
.alignRight { text-align: right; }
</style>
<meta charset="utf-8">
<script type="text/javascript"
src="https://www.google.com/jsapi?autoload={
'modules':[{
'name':'visualization',
'version':'1',
'packages':['corechart']
}]
}"></script>
<script type="text/javascript">
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = google.visualization.arrayToDataTable(
{{kbstats.latency_tuples}}
);
var ticks =
[{v:2,f:'50%'},
{v:4,f:'75%'},
{v:10,f:'90%'},
{v:100,f:'99%'},
{v:1000,f:'99.9%'},
{v:10000,f:'99.99%'},
{v:100000,f:'99.999%'},
];
var options = {
title: 'HTTP Requests Latency Distribution',
curveType: 'function',
hAxis: {title: 'Percentile', minValue: 0, logScale: true, ticks:ticks },
vAxis: {title: 'Latency (ms)', minValue: 0, logScale: true,
gridlines: {count: 8},
minorGridlines: {count: 1},
minValue: 0 },
legend: { position: 'bottom' }
};
var chart = new google.visualization.LineChart(document.getElementById('curve_chart'));
// add tooptips with correct percentile text to data:
var columns = [0];
for (var i = 1; i < data.getNumberOfColumns(); i++) {
columns.push(i);
columns.push({
type: 'string',
properties: {
role: 'tooltip'
},
calc: (function (j) {
return function (dt, row) {
var percentile = 100.0 - (100.0/dt.getValue(row, 0));
return dt.getColumnLabel(j) + ': ' +
percentile +
'\%\'ile = ' + dt.getValue(row, j) + ' msec'
}
})(i)
});
}
var view = new google.visualization.DataView(data);
view.setColumns(columns);
chart.draw(view, options);
}
// For jquery dataTable
$(document).ready(function() {
$('#runs_table').dataTable({
"searching": {{kbstats.search_page}},
"paging": {{kbstats.search_page}},
"bInfo" : {{kbstats.search_page}},
"aoColumnDefs": [
{ "sClass": "alignRight", "aTargets": [ 1, 2, 3, 4, 5, 6 ] }
]
});
} );
</script>
</head>
<body>
<div class="container-fluid">
<h2><i class="glyphicon glyphicon-dashboard"></i> KloudBuster Report</h2>
<div class="panel panel-primary">
<div class="panel-heading"><h3>HTTP Scale Results</h3></div>
<div class="panel-body">
{% if kbstats.table %}
<div class="row"><!-- ROW1 -->
<table id="runs_table" class="table hover display compact" cellspacing="0" width="100%">
<thead>
<tr>
{% for col_name in kbstats.table.col_names %}
<th>{{col_name}}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row in kbstats.table.rows %}
<tr>
{% for cell in row.cells %}
<td>{{cell}}</td>
{% endfor %}
<td>
<div class="progress-bar" role="progressbar" style="width:{{row.rx.percent}}%;min-width: 20px">
<span>{{ row.rx.value }}</span>
</div>
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div><!-- ROW1 -->
{% endif %}
<div class="row" align="center"><!-- LATENCY CHART ROW -->
<div id="curve_chart" style="width: 900px; height: 500px"></div>
</div><!-- LATENCY CHART ROW -->
</div><!--/panel-body-->
</div><!--/panel-->
</div><!--/container-->
</body>
</html>

View File

@ -1 +0,0 @@
dib/elements/kloudbuster/static/kb_test/kb_vm_agent.py

View File

@ -1,599 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from multiprocessing.pool import ThreadPool
import os
import sys
import threading
import traceback
import base_compute
import base_network
import glanceclient.exc as glance_exception
from glanceclient.v2 import client as glanceclient
from kb_config import KBConfig
from kb_runner import KBRunner
from kb_scheduler import KBScheduler
from keystoneclient.v2_0 import client as keystoneclient
import log as logging
from novaclient.exceptions import ClientException
from oslo_config import cfg
from tabulate import tabulate
import tenant
import sshutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
__version__ = '1.0.0'
KB_IMAGE_MAJOR_VERSION = 1
class KBVMCreationException(Exception):
pass
def create_keystone_client(admin_creds):
"""
Return the keystone client and auth URL given a credential
"""
creds = admin_creds.get_credentials()
return (keystoneclient.Client(**creds), creds['auth_url'])
def check_and_upload_images(cred, cred_testing, server_img_name, client_img_name):
keystone_list = [create_keystone_client(cred)[0], create_keystone_client(cred_testing)[0]]
keystone_dict = dict(zip(['Server kloud', 'Client kloud'], keystone_list))
img_name_dict = dict(zip(['Server kloud', 'Client kloud'], [server_img_name, client_img_name]))
for kloud, keystone in keystone_dict.items():
glance_endpoint = keystone.service_catalog.url_for(
service_type='image', endpoint_type='publicURL')
glance_client = glanceclient.Client(glance_endpoint, token=keystone.auth_token)
try:
# Search for the image
glance_client.images.list(filters={'name': img_name_dict[kloud]}).next()
return True
except StopIteration:
pass
# Trying upload images
LOG.info("Image is not found in %s, trying to upload..." % (kloud))
if not os.path.exists('dib/kloudbuster.qcow2'):
LOG.error("Image file dib/kloudbuster.qcow2 is not present, please refer "
"to dib/README.rst for how to build image for KloudBuster.")
return False
with open('dib/kloudbuster.qcow2') as fimage:
try:
image = glance_client.images.create(name=img_name_dict[kloud],
disk_format="qcow2",
container_format="bare",
visibility='public')
glance_client.images.upload(image['id'], fimage)
except glance_exception.HTTPForbidden:
LOG.error("Cannot upload image without admin access. Please make sure the "
"image is existed in cloud, and is either public or owned by you.")
sys.exit(1)
return True
class Kloud(object):
def __init__(self, scale_cfg, admin_creds, reusing_tenants, testing_side=False):
self.cred = admin_creds
self.tenant_list = []
self.testing_side = testing_side
self.scale_cfg = scale_cfg
self.reusing_tenants = reusing_tenants
self.keystone, self.auth_url = create_keystone_client(self.cred)
self.flavor_to_use = None
if testing_side:
self.prefix = 'KBc'
self.name = 'Client Kloud'
else:
self.prefix = 'KBs'
self.name = 'Server Kloud'
LOG.info("Creating kloud: " + self.prefix)
# if this cloud is sharing a network then all tenants must hook up to
# it and on deletion that shared network must NOT be deleted
# as it will be deleted by the owner
# pre-compute the placement az to use for all VMs
self.placement_az = None
if scale_cfg['availability_zone']:
self.placement_az = scale_cfg['availability_zone']
LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))
def create_resources(self, tenant_quota):
if self.reusing_tenants:
for tenant_info in self.reusing_tenants:
tenant_name = tenant_info['name']
user_list = tenant_info['user']
tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota,
reusing_users=user_list)
self.tenant_list.append(tenant_instance)
else:
for tenant_count in xrange(self.scale_cfg['number_tenants']):
tenant_name = self.prefix + "-T" + str(tenant_count)
tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota)
self.tenant_list.append(tenant_instance)
for tenant_instance in self.tenant_list:
tenant_instance.create_resources()
if not self.reusing_tenants:
# Create flavors for servers, clients, and kb-proxy nodes
nova_client = self.tenant_list[0].user_list[0].nova_client
flavor_manager = base_compute.Flavor(nova_client)
flavor_dict = self.scale_cfg.flavor
if self.testing_side:
flavor_manager.create_flavor('kb.client', override=True, **flavor_dict)
flavor_manager.create_flavor('kb.proxy', override=True, ram=2048, vcpus=1, disk=20)
else:
flavor_manager.create_flavor('kb.server', override=True, **flavor_dict)
def delete_resources(self):
# Deleting flavors created by KloudBuster
try:
nova_client = self.tenant_list[0].user_list[0].nova_client
except Exception:
# NOVA Client is not yet initialized, so skip cleaning up...
return
if not self.reusing_tenants:
flavor_manager = base_compute.Flavor(nova_client)
if self.testing_side:
flavor_manager.delete_flavor('kb.client')
flavor_manager.delete_flavor('kb.proxy')
else:
flavor_manager.delete_flavor('kb.server')
for tnt in self.tenant_list:
tnt.delete_resources()
def get_first_network(self):
if self.tenant_list:
return self.tenant_list[0].get_first_network()
return None
def get_all_instances(self, include_kb_proxy=False):
all_instances = []
for tnt in self.tenant_list:
all_instances.extend(tnt.get_all_instances())
if (not include_kb_proxy) and all_instances[-1].vm_name == 'KB-PROXY':
all_instances.pop()
return all_instances
def attach_to_shared_net(self, shared_net):
# If a shared network exists create a port on this
# network and attach to router interface
for tnt in self.tenant_list:
for usr in tnt.user_list:
for rtr in usr.router_list:
rtr.shared_network = shared_net
rtr.attach_router_interface(shared_net, use_port=True)
for net in rtr.network_list:
for ins in net.instance_list:
ins.shared_interface_ip = rtr.shared_interface_ip
def get_az(self):
'''Placement algorithm for all VMs created in this kloud
Return None if placement to be provided by the nova scheduler
Else return an availability zone to use (e.g. "nova")
or a compute host to use (e.g. "nova:tme123")
'''
return self.placement_az
def create_vm(self, instance):
LOG.info("Creating Instance: " + instance.vm_name)
instance.create_server(**instance.boot_info)
if not instance.instance:
raise KBVMCreationException()
instance.fixed_ip = instance.instance.networks.values()[0][0]
if (instance.vm_name == "KB-PROXY") and (not instance.config['use_floatingip']):
neutron_client = instance.network.router.user.neutron_client
external_network = base_network.find_external_network(neutron_client)
instance.fip = base_network.create_floating_ip(neutron_client, external_network)
instance.fip_ip = instance.fip['floatingip']['floating_ip_address']
if instance.fip:
# Associate the floating ip with this instance
instance.instance.add_floating_ip(instance.fip_ip)
instance.ssh_ip = instance.fip_ip
else:
# Store the fixed ip as ssh ip since there is no floating ip
instance.ssh_ip = instance.fixed_ip
def create_vms(self, vm_creation_concurrency):
tpool = ThreadPool(processes=vm_creation_concurrency)
tpool.map(self.create_vm, self.get_all_instances())
class KloudBuster(object):
"""
Creates resources on the cloud for loading up the cloud
1. Tenants
2. Users per tenant
3. Routers per user
4. Networks per router
5. Instances per network
"""
def __init__(self, server_cred, client_cred, server_cfg, client_cfg, topology, tenants_list):
# List of tenant objects to keep track of all tenants
self.server_cfg = server_cfg
self.client_cfg = client_cfg
if topology and tenants_list:
self.topology = None
LOG.warn("REUSING MODE: Topology configs will be ignored.")
else:
self.topology = topology
if tenants_list:
self.tenants_list = {}
self.tenants_list['server'] =\
[{'name': tenants_list['tenant_name'], 'user': tenants_list['server_user']}]
self.tenants_list['client'] =\
[{'name': tenants_list['tenant_name'], 'user': [tenants_list['client_user']]}]
LOG.warn("REUSING MODE: The quotas will not be adjusted automatically.")
LOG.warn("REUSING MODE: The flavor configs will be ignored.")
else:
self.tenants_list = {'server': None, 'client': None}
# TODO(check on same auth_url instead)
if server_cred == client_cred:
self.single_cloud = True
else:
self.single_cloud = False
self.kloud = Kloud(server_cfg, server_cred, self.tenants_list['server'])
self.testing_kloud = Kloud(client_cfg, client_cred,
self.tenants_list['client'],
testing_side=True)
self.kb_proxy = None
self.final_result = None
self.server_vm_create_thread = None
self.client_vm_create_thread = None
def print_provision_info(self):
"""
Function that iterates and prints all VM info
for tested and testing cloud
"""
table = [["VM Name", "Host", "Internal IP", "Floating IP", "Subnet", "Shared Interface IP"]]
client_list = self.kloud.get_all_instances()
for instance in client_list:
row = [instance.vm_name, instance.host, instance.fixed_ip,
instance.fip_ip, instance.subnet_ip, instance.shared_interface_ip]
table.append(row)
LOG.info('Provision Details (Tested Kloud)\n' +
tabulate(table, headers="firstrow", tablefmt="psql"))
table = [["VM Name", "Host", "Internal IP", "Floating IP", "Subnet"]]
client_list = self.testing_kloud.get_all_instances(include_kb_proxy=True)
for instance in client_list:
row = [instance.vm_name, instance.host, instance.fixed_ip,
instance.fip_ip, instance.subnet_ip]
table.append(row)
LOG.info('Provision Details (Testing Kloud)\n' +
tabulate(table, headers="firstrow", tablefmt="psql"))
def gen_user_data(self, role):
LOG.info("Preparing metadata for VMs... (%s)" % role)
if role == "Server":
svr_list = self.kloud.get_all_instances()
KBScheduler.setup_vm_placement(role, svr_list, self.topology,
self.kloud.placement_az, "Round-robin")
for ins in svr_list:
ins.user_data['role'] = 'Server'
ins.boot_info['flavor_type'] =\
self.kloud.flavor_to_use if self.tenants_list else 'kb.server'
ins.boot_info['user_data'] = str(ins.user_data)
elif role == "Client":
client_list = self.testing_kloud.get_all_instances()
svr_list = self.kloud.get_all_instances()
KBScheduler.setup_vm_mappings(client_list, svr_list, "1:1")
KBScheduler.setup_vm_placement(role, client_list, self.topology,
self.testing_kloud.placement_az, "Round-robin")
for idx, ins in enumerate(client_list):
ins.user_data['role'] = 'Client'
ins.user_data['vm_name'] = ins.vm_name
ins.user_data['redis_server'] = self.kb_proxy.fixed_ip
ins.user_data['redis_server_port'] = 6379
ins.user_data['target_subnet_ip'] = svr_list[idx].subnet_ip
ins.user_data['target_shared_interface_ip'] = svr_list[idx].shared_interface_ip
ins.user_data['http_tool'] = ins.config['http_tool']
ins.user_data['http_tool_configs'] = ins.config['http_tool_configs']
ins.boot_info['flavor_type'] =\
self.testing_kloud.flavor_to_use if self.tenants_list else 'kb.client'
ins.boot_info['user_data'] = str(ins.user_data)
def run(self):
"""
The runner for KloudBuster Tests
Executes tests serially
Support concurrency in fututure
"""
kbrunner = None
vm_creation_concurrency = self.client_cfg.vm_creation_concurrency
try:
tenant_quota = self.calc_tenant_quota()
self.kloud.create_resources(tenant_quota['server'])
self.testing_kloud.create_resources(tenant_quota['client'])
# Start the runner and ready for the incoming redis messages
client_list = self.testing_kloud.get_all_instances()
server_list = self.kloud.get_all_instances()
# Setting up the KloudBuster Proxy node
self.kb_proxy = client_list[-1]
client_list.pop()
self.kb_proxy.vm_name = 'KB-PROXY'
self.kb_proxy.user_data['role'] = 'KB-PROXY'
self.kb_proxy.boot_info['flavor_type'] =\
self.testing_kloud.flavor_to_use if self.tenants_list else 'kb.proxy'
if self.testing_kloud.placement_az:
self.kb_proxy.boot_info['avail_zone'] = "%s:%s" %\
(self.testing_kloud.placement_az, self.topology.clients_rack.split()[0])
self.kb_proxy.boot_info['user_data'] = str(self.kb_proxy.user_data)
self.testing_kloud.create_vm(self.kb_proxy)
kbrunner = KBRunner(client_list, self.client_cfg,
KB_IMAGE_MAJOR_VERSION,
self.single_cloud)
kbrunner.setup_redis(self.kb_proxy.fip_ip)
if self.single_cloud:
# Find the shared network if the cloud used to testing is same
# Attach the router in tested kloud to the shared network
shared_net = self.testing_kloud.get_first_network()
self.kloud.attach_to_shared_net(shared_net)
# Create VMs in both tested and testing kloud concurrently
self.client_vm_create_thread = threading.Thread(target=self.testing_kloud.create_vms,
args=[vm_creation_concurrency])
self.server_vm_create_thread = threading.Thread(target=self.kloud.create_vms,
args=[vm_creation_concurrency])
self.client_vm_create_thread.daemon = True
self.server_vm_create_thread.daemon = True
if self.single_cloud:
self.gen_user_data("Server")
self.server_vm_create_thread.start()
self.server_vm_create_thread.join()
self.gen_user_data("Client")
self.client_vm_create_thread.start()
self.client_vm_create_thread.join()
else:
self.gen_user_data("Server")
self.gen_user_data("Client")
self.server_vm_create_thread.start()
self.client_vm_create_thread.start()
self.server_vm_create_thread.join()
self.client_vm_create_thread.join()
# Function that print all the provisioning info
self.print_provision_info()
# Run the runner to perform benchmarkings
kbrunner.run()
self.final_result = kbrunner.tool_result
self.final_result['total_server_vms'] = len(server_list)
self.final_result['total_client_vms'] = len(client_list)
# self.final_result['host_stats'] = kbrunner.host_stats
LOG.info(self.final_result)
except KeyboardInterrupt:
traceback.format_exc()
except (sshutils.SSHError, ClientException, Exception):
traceback.print_exc()
# Cleanup: start with tested side first
# then testing side last (order is important because of the shared network)
if self.server_cfg['cleanup_resources']:
try:
self.kloud.delete_resources()
except Exception:
traceback.print_exc()
if self.client_cfg['cleanup_resources']:
try:
self.testing_kloud.delete_resources()
except Exception:
traceback.print_exc()
if kbrunner:
kbrunner.dispose()
def get_tenant_vm_count(self, config):
return (config['users_per_tenant'] * config['routers_per_user'] *
config['networks_per_router'] * config['vms_per_network'])
def calc_neutron_quota(self):
total_vm = self.get_tenant_vm_count(self.server_cfg)
server_quota = {}
server_quota['network'] = self.server_cfg['routers_per_user'] *\
self.server_cfg['networks_per_router']
server_quota['subnet'] = server_quota['network']
server_quota['router'] = self.server_cfg['routers_per_user']
if (self.server_cfg['use_floatingip']):
# (1) Each VM has one floating IP
# (2) Each Router has one external IP
server_quota['floatingip'] = total_vm + server_quota['router']
# (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s)
# (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s)
# (3) Each Network has one router_interface (gateway), and one DHCP agent, total of
# server_quota['network'] * 2 port(s)
# (4) Each Router has one external IP, takes up 1 port, total of
# server_quota['router'] port(s)
server_quota['port'] = 2 * total_vm + 2 * server_quota['network'] +\
server_quota['router']
else:
server_quota['floatingip'] = server_quota['router']
server_quota['port'] = total_vm + 2 * server_quota['network'] + server_quota['router']
server_quota['security_group'] = server_quota['network'] + 1
server_quota['security_group_rule'] = server_quota['security_group'] * 10
client_quota = {}
total_vm = total_vm * self.server_cfg['number_tenants']
client_quota['network'] = 1
client_quota['subnet'] = 1
client_quota['router'] = 1
if (self.client_cfg['use_floatingip']):
# (1) Each VM has one floating IP
# (2) Each Router has one external IP, total of 1 router
# (3) KB-Proxy node has one floating IP
client_quota['floatingip'] = total_vm + 1 + 1
# (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s)
# (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s)
# (3) Each Network has one router_interface (gateway), and one DHCP agent, total of
# client_quota['network'] * 2 port(s)
# (4) KB-Proxy node takes up 2 ports, one for fixed IP, one for floating IP
# (5) Each Router has one external IP, takes up 1 port, total of 1 router/port
client_quota['port'] = 2 * total_vm + 2 * client_quota['network'] + 2 + 1
else:
client_quota['floatingip'] = 1 + 1
client_quota['port'] = total_vm + 2 * client_quota['network'] + 2 + 1
if self.single_cloud:
# Under single-cloud mode, the shared network is attached to every router in server
# cloud, and each one takes up 1 port on client side.
client_quota['port'] = client_quota['port'] + server_quota['router']
client_quota['security_group'] = client_quota['network'] + 1
client_quota['security_group_rule'] = client_quota['security_group'] * 10
return [server_quota, client_quota]
def calc_nova_quota(self):
total_vm = self.get_tenant_vm_count(self.server_cfg)
server_quota = {}
server_quota['instances'] = total_vm
server_quota['cores'] = total_vm * self.server_cfg['flavor']['vcpus']
server_quota['ram'] = total_vm * self.server_cfg['flavor']['ram']
client_quota = {}
total_vm = total_vm * self.server_cfg['number_tenants']
client_quota['instances'] = total_vm + 1
client_quota['cores'] = total_vm * self.client_cfg['flavor']['vcpus'] + 1
client_quota['ram'] = total_vm * self.client_cfg['flavor']['ram'] + 2048
return [server_quota, client_quota]
def calc_cinder_quota(self):
total_vm = self.get_tenant_vm_count(self.server_cfg)
server_quota = {}
server_quota['gigabytes'] = total_vm * self.server_cfg['flavor']['disk']
client_quota = {}
total_vm = total_vm * self.server_cfg['number_tenants']
client_quota['gigabytes'] = total_vm * self.client_cfg['flavor']['disk'] + 20
return [server_quota, client_quota]
def calc_tenant_quota(self):
quota_dict = {'server': {}, 'client': {}}
nova_quota = self.calc_nova_quota()
neutron_quota = self.calc_neutron_quota()
cinder_quota = self.calc_cinder_quota()
for idx, val in enumerate(['server', 'client']):
quota_dict[val]['nova'] = nova_quota[idx]
quota_dict[val]['neutron'] = neutron_quota[idx]
quota_dict[val]['cinder'] = cinder_quota[idx]
return quota_dict
# Some hardcoded client side options we do not want users to change
hardcoded_client_cfg = {
# Number of tenants to be created on the cloud
'number_tenants': 1,
# Number of Users to be created inside the tenant
'users_per_tenant': 1,
# Number of routers to be created within the context of each User
# For now support only 1 router per user
'routers_per_user': 1,
# Number of networks to be created within the context of each Router
# Assumes 1 subnet per network
'networks_per_router': 1,
# Number of VM instances to be created within the context of each Network
'vms_per_network': 1,
# Number of security groups per network
'secgroups_per_network': 1
}
if __name__ == '__main__':
cli_opts = [
cfg.StrOpt("config",
short="c",
default=None,
help="Override default values with a config file"),
cfg.StrOpt("topology",
short="t",
default=None,
help="Topology files for compute hosts"),
cfg.StrOpt("tenants-list",
short="l",
default=None,
help="Existing tenant and user lists for reusing"),
cfg.StrOpt("tested-rc",
default=None,
help="Tested cloud openrc credentials file"),
cfg.StrOpt("testing-rc",
default=None,
help="Testing cloud openrc credentials file"),
cfg.StrOpt("tested-passwd",
default=None,
secret=True,
help="Tested cloud password"),
cfg.StrOpt("testing-passwd",
default=None,
secret=True,
help="Testing cloud password"),
cfg.StrOpt("json",
default=None,
help='store results in JSON format file'),
cfg.BoolOpt("no-env",
default=False,
help="Do not read env variables")
]
CONF.register_cli_opts(cli_opts)
CONF.set_default("verbose", True)
CONF(sys.argv[1:])
logging.setup("kloudbuster")
kb_config = KBConfig()
kb_config.init_with_cli()
image_check = check_and_upload_images(
kb_config.cred_tested,
kb_config.cred_testing,
kb_config.server_cfg.image_name,
kb_config.client_cfg.image_name)
if not image_check:
sys.exit(1)
# The KloudBuster class is just a wrapper class
# levarages tenant and user class for resource creations and deletion
kloudbuster = KloudBuster(
kb_config.cred_tested, kb_config.cred_testing,
kb_config.server_cfg, kb_config.client_cfg,
kb_config.topo_cfg, kb_config.tenants_list)
kloudbuster.run()
if CONF.json:
'''Save results in JSON format file.'''
LOG.info('Saving results in json file: ' + CONF.json + "...")
with open(CONF.json, 'w') as jfp:
json.dump(kloudbuster.final_result, jfp, indent=4, sort_keys=True)

View File

@ -1,65 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
from oslo_log import handlers
from oslo_log import log as oslogging
DEBUG_OPTS = [
cfg.BoolOpt("kb-debug",
default=False,
help="Print debug output for KloudBuster only")
]
CONF = cfg.CONF
CONF.register_cli_opts(DEBUG_OPTS)
oslogging.register_options(CONF)
logging.KBDEBUG = logging.DEBUG + 5
logging.addLevelName(logging.KBDEBUG, "KBDEBUG")
CRITICAL = logging.CRITICAL
DEBUG = logging.DEBUG
ERROR = logging.ERROR
FATAL = logging.FATAL
INFO = logging.INFO
NOTSET = logging.NOTSET
KBDEBUG = logging.KBDEBUG
WARN = logging.WARN
WARNING = logging.WARNING
def setup(product_name, version="unknown"):
dbg_color = handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG]
handlers.ColorHandler.LEVEL_COLORS[logging.KBDEBUG] = dbg_color
oslogging.setup(CONF, product_name, version)
if CONF.kb_debug:
oslogging.getLogger(
project=product_name).logger.setLevel(logging.KBDEBUG)
def getLogger(name="unknown", version="unknown"):
if name not in oslogging._loggers:
oslogging._loggers[name] = KloudBusterContextAdapter(
logging.getLogger(name), {"project": "kloudbuster",
"version": version})
return oslogging._loggers[name]
class KloudBusterContextAdapter(oslogging.KeywordArgumentAdapter):
def kbdebug(self, msg, *args, **kwargs):
self.log(logging.KBDEBUG, msg, *args, **kwargs)

View File

@ -1,109 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from base_compute import BaseCompute
import log as logging
from wrk_tool import WrkTool
LOG = logging.getLogger(__name__)
# An openstack instance (can be a VM or a LXC)
class PerfInstance(BaseCompute):
def __init__(self, vm_name, network, config, is_server=False):
BaseCompute.__init__(self, vm_name, network)
self.config = config
self.is_server = is_server
self.boot_info = {}
self.user_data = {}
self.up_flag = False
# SSH Configuration
self.ssh_access = None
self.ssh = None
self.port = None
self.az = None
if 'tp_tool' not in config:
self.tp_tool = None
# elif config.tp_tool.lower() == 'nuttcp':
# self.tp_tool = nuttcp_tool.NuttcpTool
# elif opts.tp_tool.lower() == 'iperf':
# self.tp_tool = iperf_tool.IperfTool
# else:
# self.tp_tool = None
if 'http_tool' not in config:
self.http_tool = None
elif config.http_tool.name.lower() == 'wrk':
self.http_tool = WrkTool(self, config.http_tool)
self.target_url = None
else:
self.http_tool = None
def run_tp_client(self, label, dest_ip, target_instance,
mss=None, bandwidth=0, bidirectional=False, az_to=None):
# NOTE: This function will not work, and pending to convert to use redis
'''test iperf client using the default TCP window size
(tcp window scaling is normally enabled by default so setting explicit window
size is not going to help achieve better results)
:return: a dictionary containing the results of the run
'''
# TCP/UDP throughput with tp_tool, returns a list of dict
if self.tp_tool:
tp_tool_res = self.tp_tool.run_client(dest_ip,
target_instance,
mss=mss,
bandwidth=bandwidth,
bidirectional=bidirectional)
else:
tp_tool_res = []
res = {'ip_to': dest_ip}
res['ip_from'] = self.ssh_access.host
if label:
res['desc'] = label
if self.az:
res['az_from'] = self.az
if az_to:
res['az_to'] = az_to
res['distro_id'] = self.ssh.distro_id
res['distro_version'] = self.ssh.distro_version
# consolidate results for all tools
res['results'] = tp_tool_res
return res
def http_client_parser(self, status, stdout, stderr):
http_tool_res = self.http_tool.cmd_parser_run_client(status, stdout, stderr)
res = {'vm_name': self.vm_name}
res['target_url'] = self.target_url
res['ip_from'] = self.ssh_ip
# consolidate results for all tools
res['results'] = http_tool_res
return res
# Send a command on the ssh session
def exec_command(self, cmd, timeout=30):
(status, cmd_output, err) = self.ssh.execute(cmd, timeout=timeout)
return (status, cmd_output, err)
# Dispose the ssh session
def dispose(self):
if self.ssh:
self.ssh.close()
self.ssh = None

View File

@ -1,189 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import log as logging
LOG = logging.getLogger(__name__)
# A base class for all tools that can be associated to an instance
class PerfTool(object):
__metaclass__ = abc.ABCMeta
def __init__(self, instance, tool_cfg):
self.name = tool_cfg.name
self.instance = instance
self.dest_path = tool_cfg.dest_path
self.pid = None
# Terminate pid if started
def dispose(self):
if self.pid:
# Terminate the iperf server
LOG.kbdebug("[%s] Terminating %s" % (self.instance.vm_name,
self.name))
self.instance.ssh.kill_proc(self.pid)
self.pid = None
def parse_error(self, msg):
return {'error': msg, 'tool': self.name}
def parse_results(self, protocol=None, throughput=None, lossrate=None, retrans=None,
rtt_ms=None, reverse_dir=False, msg_size=None, cpu_load=None,
http_total_req=None, http_rps=None, http_tp_kbytes=None,
http_sock_err=None, http_sock_timeout=None, http_err=None,
latency_stats=None):
res = {'tool': self.name}
if throughput is not None:
res['throughput_kbps'] = throughput
if protocol is not None:
res['protocol'] = protocol
if 'vm_bandwidth' in self.instance.config:
res['bandwidth_limit_kbps'] = self.instance.config.vm_bandwidth
if lossrate is not None:
res['loss_rate'] = lossrate
if retrans:
res['retrans'] = retrans
if rtt_ms:
res['rtt_ms'] = rtt_ms
if reverse_dir:
res['direction'] = 'reverse'
if msg_size:
res['pkt_size'] = msg_size
if cpu_load:
res['cpu_load'] = cpu_load
if http_total_req:
res['http_total_req'] = http_total_req
if http_rps:
res['http_rps'] = http_rps
if http_tp_kbytes:
res['http_throughput_kbytes'] = http_tp_kbytes
if http_sock_err:
res['http_sock_err'] = http_sock_err
if http_sock_timeout:
res['http_sock_timeout'] = http_sock_timeout
if http_err:
res['http_err'] = http_err
if latency_stats:
res['latency_stats'] = latency_stats
return res
@abc.abstractmethod
def cmd_run_client(**kwargs):
# must be implemented by sub classes
return None
@abc.abstractmethod
def cmd_parser_run_client(self, status, stdout, stderr):
# must be implemented by sub classes
return None
@staticmethod
@abc.abstractmethod
def consolidate_results(results):
# must be implemented by sub classes
return None
def find_udp_bdw(self, pkt_size, target_ip):
'''Find highest UDP bandwidth within max loss rate for given packet size
:return: a dictionary describing the optimal bandwidth (see parse_results())
'''
# we use a binary search to converge to the optimal throughput
# start with 5Gbps - mid-range between 1 and 10Gbps
# Convergence can be *very* tricky because UDP throughput behavior
# can vary dramatically between host runs and guest runs.
# The packet rate limitation is going to dictate the effective
# send rate, meaning that small packet sizes will yield the worst
# throughput.
# The measured throughput can be vastly smaller than the requested
# throughput even when the loss rate is zero when the sender cannot
# send fast enough to fill the network, in that case increasing the
# requested rate will not make it any better
# Examples:
# 1. too much difference between requested/measured bw - regardless of loss rate
# => retry with bw mid-way between the requested bw and the measured bw
# /tmp/nuttcp-7.3.2 -T2 -u -l128 -R5000000K -p5001 -P5002 -fparse 192.168.1.2
# megabytes=36.9785 real_seconds=2.00 rate_Mbps=154.8474 tx_cpu=23 rx_cpu=32
# drop=78149 pkt=381077 data_loss=20.50746
# /tmp/nuttcp-7.3.2 -T2 -u -l128 -R2500001K -p5001 -P5002 -fparse 192.168.1.2
# megabytes=47.8063 real_seconds=2.00 rate_Mbps=200.2801 tx_cpu=24 rx_cpu=34
# drop=0 pkt=391629 data_loss=0.00000
# 2. measured and requested bw are very close :
# if loss_rate is too low
# increase bw mid-way between requested and last max bw
# if loss rate is too high
# decrease bw mid-way between the measured bw and the last min bw
# else stop iteration (converged)
# /tmp/nuttcp-7.3.2 -T2 -u -l8192 -R859376K -p5001 -P5002 -fparse 192.168.1.2
# megabytes=204.8906 real_seconds=2.00 rate_Mbps=859.2992 tx_cpu=99 rx_cpu=10
# drop=0 pkt=26226 data_loss=0.00000
min_kbps = 1
max_kbps = 10000000
kbps = 5000000
min_loss_rate = self.instance.config.udp_loss_rate_range[0]
max_loss_rate = self.instance.config.udp_loss_rate_range[1]
# stop if the remaining range to cover is less than 5%
while (min_kbps * 100 / max_kbps) < 95:
res_list = self.run_client_dir(target_ip, 0, bandwidth_kbps=kbps,
udp=True, length=pkt_size,
no_cpu_timed=1)
# always pick the first element in the returned list of dict(s)
# should normally only have 1 element
res = res_list[0]
if 'error' in res:
return res
loss_rate = res['loss_rate']
measured_kbps = res['throughput_kbps']
LOG.kbdebug(
"[%s] pkt-size=%d throughput=%d<%d/%d<%d Kbps loss-rate=%d" %
(self.instance.vm_name, pkt_size, min_kbps, measured_kbps,
kbps, max_kbps, loss_rate))
# expected rate must be at least 80% of the requested rate
if (measured_kbps * 100 / kbps) < 80:
# the measured bw is too far away from the requested bw
# take half the distance or 3x the measured bw whichever is lowest
kbps = measured_kbps + (kbps - measured_kbps) / 2
if measured_kbps:
kbps = min(kbps, measured_kbps * 3)
max_kbps = kbps
continue
# The measured bw is within striking distance from the requested bw
# increase bw if loss rate is too small
if loss_rate < min_loss_rate:
# undershot
if measured_kbps > min_kbps:
min_kbps = measured_kbps
else:
# to make forward progress we need to increase min_kbps
# and try a higher bw since the loss rate is too low
min_kbps = int((max_kbps + min_kbps) / 2)
kbps = int((max_kbps + min_kbps) / 2)
# LOG.info(" undershot, min=%d kbps=%d max=%d" % (min_kbps, kbps, max_kbps))
elif loss_rate > max_loss_rate:
# overshot
max_kbps = kbps
if measured_kbps < kbps:
kbps = measured_kbps
else:
kbps = int((max_kbps + min_kbps) / 2)
# LOG.info(" overshot, min=%d kbps=%d max=%d" % (min_kbps, kbps, max_kbps))
else:
# converged within loss rate bracket
break
return res

View File

@ -1,668 +0,0 @@
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""High level ssh library.
Usage examples:
Execute command and get output:
ssh = sshclient.SSH('root', 'example.com', port=33)
status, stdout, stderr = ssh.execute('ps ax')
if status:
raise Exception('Command failed with non-zero status.')
print stdout.splitlines()
Execute command with huge output:
class PseudoFile(object):
def write(chunk):
if 'error' in chunk:
email_admin(chunk)
ssh = sshclient.SSH('root', 'example.com')
ssh.run('tail -f /var/log/syslog', stdout=PseudoFile(), timeout=False)
Execute local script on remote side:
ssh = sshclient.SSH('user', 'example.com')
status, out, err = ssh.execute('/bin/sh -s arg1 arg2',
stdin=open('~/myscript.sh', 'r'))
Upload file:
ssh = sshclient.SSH('user', 'example.com')
ssh.run('cat > ~/upload/file.gz', stdin=open('/store/file.gz', 'rb'))
Eventlet:
eventlet.monkey_patch(select=True, time=True)
or
eventlet.monkey_patch()
or
sshclient = eventlet.import_patched("opentstack.common.sshclient")
"""
import re
import select
import socket
import StringIO
import sys
import time
import log as logging
import paramiko
import scp
LOG = logging.getLogger(__name__)
class SSHError(Exception):
pass
class SSHTimeout(SSHError):
pass
# Check IPv4 address syntax - not completely fool proof but will catch
# some invalid formats
def is_ipv4(address):
try:
socket.inet_aton(address)
except socket.error:
return False
return True
class SSHAccess(object):
'''
A class to contain all the information needed to access a host
(native or virtual) using SSH
'''
def __init__(self, arg_value=None):
'''
decode user@host[:pwd]
'hugo@1.1.1.1:secret' -> ('hugo', '1.1.1.1', 'secret', None)
'huggy@2.2.2.2' -> ('huggy', '2.2.2.2', None, None)
None ->(None, None, None, None)
Examples of fatal errors (will call exit):
'hutch@q.1.1.1' (invalid IP)
'@3.3.3.3' (missing username)
'hiro@' or 'buggy' (missing host IP)
The error field will be None in case of success or will
contain a string describing the error
'''
self.username = None
self.host = None
self.password = None
# name of the file that contains the private key
self.private_key_file = None
# this is the private key itself (a long string starting with
# -----BEGIN RSA PRIVATE KEY-----
# used when the private key is not saved in any file
self.private_key = None
self.public_key_file = None
self.port = 22
self.error = None
if not arg_value:
return
match = re.search(r'^([^@]+)@([0-9\.]+):?(.*)$', arg_value)
if not match:
self.error = 'Invalid argument: ' + arg_value
return
if not is_ipv4(match.group(2)):
self.error = 'Invalid IPv4 address ' + match.group(2)
return
(self.username, self.host, self.password) = match.groups()
def copy_from(self, ssh_access):
self.username = ssh_access.username
self.host = ssh_access.host
self.port = ssh_access.port
self.password = ssh_access.password
self.private_key = ssh_access.private_key
self.public_key_file = ssh_access.public_key_file
self.private_key_file = ssh_access.private_key_file
class SSH(object):
"""Represent ssh connection."""
def __init__(self, ssh_access,
connect_timeout=60,
connect_retry_count=30,
connect_retry_wait_sec=2):
"""Initialize SSH client.
:param user: ssh username
:param host: hostname or ip address of remote ssh server
:param port: remote ssh port
:param pkey: RSA or DSS private key string or file object
:param key_filename: private key filename
:param password: password
:param connect_timeout: timeout when connecting ssh
:param connect_retry_count: how many times to retry connecting
:param connect_retry_wait_sec: seconds to wait between retries
"""
self.ssh_access = ssh_access
if ssh_access.private_key:
self.pkey = self._get_pkey(ssh_access.private_key)
else:
self.pkey = None
self._client = False
self.connect_timeout = connect_timeout
self.connect_retry_count = connect_retry_count
self.connect_retry_wait_sec = connect_retry_wait_sec
self.distro_id = None
self.distro_id_like = None
self.distro_version = None
self.__get_distro()
def _get_pkey(self, key):
'''Get the binary form of the private key
from the text form
'''
if isinstance(key, basestring):
key = StringIO.StringIO(key)
errors = []
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
try:
return key_class.from_private_key(key)
except paramiko.SSHException as exc:
errors.append(exc)
raise SSHError('Invalid pkey: %s' % (errors))
def _get_client(self):
if self._client:
return self._client
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for _ in range(self.connect_retry_count):
try:
self._client.connect(self.ssh_access.host,
username=self.ssh_access.username,
port=self.ssh_access.port,
pkey=self.pkey,
key_filename=self.ssh_access.private_key_file,
password=self.ssh_access.password,
timeout=self.connect_timeout)
return self._client
except (paramiko.AuthenticationException,
paramiko.BadHostKeyException,
paramiko.SSHException,
socket.error):
time.sleep(self.connect_retry_wait_sec)
self._client = None
msg = '[%s] SSH Connection failed after %s attempts' % (self.ssh_access.host,
self.connect_retry_count)
raise SSHError(msg)
def close(self):
self._client.close()
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
"""Execute specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
:param raise_on_error: If False then exit code will be return. If True
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
"""
client = self._get_client()
if isinstance(stdin, basestring):
stdin = StringIO.StringIO(stdin)
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
timeout=timeout)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
transport = client.get_transport()
session = transport.open_session()
session.exec_command(cmd)
start_time = time.time()
data_to_send = ''
stderr_data = None
# If we have data to be sent to stdin then `select' should also
# check for stdin availability.
if stdin and not stdin.closed:
writes = [session]
else:
writes = []
while True:
# Block until data can be read/write.
select.select([session], writes, [session], 1)
if session.recv_ready():
data = session.recv(4096)
if stdout is not None:
stdout.write(data)
continue
if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
if stderr is not None:
stderr.write(stderr_data)
continue
if session.send_ready():
if stdin is not None and not stdin.closed:
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
stdin.close()
session.shutdown_write()
writes = []
continue
sent_bytes = session.send(data_to_send)
data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
if timeout and (time.time() - timeout) > start_time:
args = {'cmd': cmd, 'host': self.ssh_access.host}
raise SSHTimeout(('Timeout executing command '
'"%(cmd)s" on host %(host)s') % args)
# if e:
# raise SSHError('Socket error.')
exit_status = session.recv_exit_status()
if 0 != exit_status and raise_on_error:
fmt = ('Command "%(cmd)s" failed with exit_status %(status)d.')
details = fmt % {'cmd': cmd, 'status': exit_status}
if stderr_data:
details += (' Last stderr data: "%s".') % stderr_data
raise SSHError(details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
"""Execute the specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file to be sent on process stdin.
:param timeout: Timeout for execution of the command.
Return tuple (exit_status, stdout, stderr)
"""
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
exit_status = self.run(cmd, stderr=stderr,
stdout=stdout, stdin=stdin,
timeout=timeout, raise_on_error=False)
stdout.seek(0)
stderr.seek(0)
return (exit_status, stdout.read(), stderr.read())
def wait(self, timeout=120, interval=1):
"""Wait for the host will be available via ssh."""
start_time = time.time()
while True:
try:
return self.execute('uname')
except (socket.error, SSHError):
time.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout(('Timeout waiting for "%s"') % self.ssh_access.host)
def __extract_property(self, name, input_str):
expr = name + r'="?([\w\.]*)"?'
match = re.search(expr, input_str)
if match:
return match.group(1)
return 'Unknown'
# Get the linux distro
def __get_distro(self):
'''cat /etc/*-release | grep ID
Ubuntu:
DISTRIB_ID=Ubuntu
ID=ubuntu
ID_LIKE=debian
VERSION_ID="14.04"
RHEL:
ID="rhel"
ID_LIKE="fedora"
VERSION_ID="7.0"
'''
distro_cmd = "grep ID /etc/*-release"
(status, distro_out, _) = self.execute(distro_cmd)
if status:
distro_out = ''
self.distro_id = self.__extract_property('ID', distro_out)
self.distro_id_like = self.__extract_property('ID_LIKE', distro_out)
self.distro_version = self.__extract_property('VERSION_ID', distro_out)
def pidof(self, proc_name):
'''
Return a list containing the pids of all processes of a given name
the list is empty if there is no pid
'''
# the path update is necessary for RHEL
cmd = "PATH=$PATH:/usr/sbin pidof " + proc_name
(status, cmd_output, _) = self.execute(cmd)
if status:
return []
cmd_output = cmd_output.strip()
result = cmd_output.split()
return result
# kill pids in the given list of pids
def kill_proc(self, pid_list):
cmd = "kill -9 " + ' '.join(pid_list)
self.execute(cmd)
# check stats for a given path
def stat(self, path):
(status, cmd_output, _) = self.execute('stat ' + path)
if status:
return None
return cmd_output
def ping_check(self, target_ip, ping_count=2, pass_threshold=80):
'''helper function to ping from one host to an IP address,
for a given count and pass_threshold;
Steps:
ssh to the host and then ping to the target IP
then match the output and verify that the loss% is
less than the pass_threshold%
Return 1 if the criteria passes
Return 0, if it fails
'''
cmd = "ping -c " + str(ping_count) + " " + str(target_ip)
(_, cmd_output, _) = self.execute(cmd)
match = re.search(r'(\d*)% packet loss', cmd_output)
pkt_loss = match.group(1)
if int(pkt_loss) < int(pass_threshold):
return 1
else:
LOG.error('Ping to %s failed: %s' % (target_ip, cmd_output))
return 0
def get_file_from_host(self, from_path, to_path):
'''
A wrapper api on top of paramiko scp module, to scp
a local file to the host.
'''
sshcon = self._get_client()
scpcon = scp.SCPClient(sshcon.get_transport())
try:
scpcon.get(from_path, to_path)
except scp.SCPException as exp:
LOG.error("Send failed: [%s]", exp)
return 0
return 1
def read_remote_file(self, from_path):
'''
Read a remote file and save it to a buffer.
'''
cmd = "cat " + from_path
(status, cmd_output, _) = self.execute(cmd)
if status:
return None
return cmd_output
def get_host_os_version(self):
'''
Identify the host distribution/relase.
'''
os_release_file = "/etc/os-release"
sys_release_file = "/etc/system-release"
name = ""
version = ""
if self.stat(os_release_file):
data = self.read_remote_file(os_release_file)
if data is None:
LOG.error("ERROR:Failed to read file %s" % os_release_file)
return None
for line in data.splitlines():
mobj = re.match(r'NAME=(.*)', line)
if mobj:
name = mobj.group(1).strip("\"")
mobj = re.match(r'VERSION_ID=(.*)', line)
if mobj:
version = mobj.group(1).strip("\"")
os_name = name + " " + version
return os_name
if self.stat(sys_release_file):
data = self.read_remote_file(sys_release_file)
if data is None:
LOG.error("ERROR:Failed to read file %s" % sys_release_file)
return None
for line in data.splitlines():
mobj = re.match(r'Red Hat.*', line)
if mobj:
return mobj.group(0)
return None
def check_rpm_package_installed(self, rpm_pkg):
'''
Given a host and a package name, check if it is installed on the
system.
'''
check_pkg_cmd = "rpm -qa | grep " + rpm_pkg
(status, cmd_output, _) = self.execute(check_pkg_cmd)
if status:
return None
pkg_pattern = ".*" + rpm_pkg + ".*"
rpm_pattern = re.compile(pkg_pattern, re.IGNORECASE)
for line in cmd_output.splitlines():
mobj = rpm_pattern.match(line)
if mobj:
return mobj.group(0)
print "%s pkg installed " % rpm_pkg
return None
def get_openstack_release(self, ver_str):
'''
Get the release series name from the package version
Refer to here for release tables:
https://wiki.openstack.org/wiki/Releases
'''
ver_table = {"2015.1": "Kilo",
"2014.2": "Juno",
"2014.1": "Icehouse",
"2013.2": "Havana",
"2013.1": "Grizzly",
"2012.2": "Folsom",
"2012.1": "Essex",
"2011.3": "Diablo",
"2011.2": "Cactus",
"2011.1": "Bexar",
"2010.1": "Austin"}
ver_prefix = re.search(r"20\d\d\.\d", ver_str).group(0)
if ver_prefix in ver_table:
return ver_table[ver_prefix]
else:
return "Unknown"
def check_openstack_version(self):
'''
Identify the openstack version running on the controller.
'''
nova_cmd = "nova-manage --version"
(status, _, err_output) = self.execute(nova_cmd)
if status:
return "Unknown"
ver_str = err_output.strip()
release_str = self.get_openstack_release(err_output)
return release_str + " (" + ver_str + ")"
def get_cpu_info(self):
'''
Get the CPU info of the controller.
Note: Here we are assuming the controller node has the exact
hardware as the compute nodes.
'''
cmd = 'cat /proc/cpuinfo | grep -m1 "model name"'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
model_name = re.search(r":\s(.*)", std_output).group(1)
cmd = 'cat /proc/cpuinfo | grep "model name" | wc -l'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
cores = std_output.strip()
return (cores + " * " + model_name)
def get_nic_name(self, agent_type, encap, internal_iface_dict):
'''
Get the NIC info of the controller.
Note: Here we are assuming the controller node has the exact
hardware as the compute nodes.
'''
# The internal_ifac_dict is a dictionary contains the mapping between
# hostname and the internal interface name like below:
# {u'hh23-4': u'eth1', u'hh23-5': u'eth1', u'hh23-6': u'eth1'}
cmd = "hostname"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
hostname = std_output.strip()
if hostname in internal_iface_dict:
iface = internal_iface_dict[hostname]
else:
return "Unknown"
# Figure out which interface is for internal traffic
if 'Linux bridge' in agent_type:
ifname = iface
elif 'Open vSwitch' in agent_type:
if encap == 'vlan':
# [root@hh23-10 ~]# ovs-vsctl list-ports br-inst
# eth1
# phy-br-inst
cmd = 'ovs-vsctl list-ports ' + iface + ' | grep -E "^[^phy].*"'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
ifname = std_output.strip()
elif encap == 'vxlan' or encap == 'gre':
# This is complicated. We need to first get the local IP address on
# br-tun, then do a reverse lookup to get the physical interface.
#
# [root@hh23-4 ~]# ip addr show to "23.23.2.14"
# 3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
# inet 23.23.2.14/24 brd 23.23.2.255 scope global eth1
# valid_lft forever preferred_lft forever
cmd = "ip addr show to " + iface + " | awk -F: '{print $2}'"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
ifname = std_output.strip()
else:
return "Unknown"
cmd = 'ethtool -i ' + ifname + ' | grep bus-info'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
bus_info = re.search(r":\s(.*)", std_output).group(1)
cmd = 'lspci -s ' + bus_info
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
nic_name = re.search(r"Ethernet controller:\s(.*)", std_output).group(1)
return (nic_name)
def get_l2agent_version(self, agent_type):
'''
Get the L2 agent version of the controller.
Note: Here we are assuming the controller node has the exact
hardware as the compute nodes.
'''
if 'Linux bridge' in agent_type:
cmd = "brctl --version | awk -F',' '{print $2}'"
ver_string = "Linux Bridge "
elif 'Open vSwitch' in agent_type:
cmd = "ovs-vsctl --version | awk -F')' '{print $2}'"
ver_string = "OVS "
else:
return "Unknown"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
return ver_string + std_output.strip()
##################################################
# Only invoke the module directly for test purposes. Should be
# invoked from pns script.
##################################################
def main():
# As argument pass the SSH access string, e.g. "localadmin@1.1.1.1:secret"
test_ssh = SSH(SSHAccess(sys.argv[1]))
print 'ID=' + test_ssh.distro_id
print 'ID_LIKE=' + test_ssh.distro_id_like
print 'VERSION_ID=' + test_ssh.distro_version
# ssh.wait()
# print ssh.pidof('bash')
# print ssh.stat('/tmp')
print test_ssh.check_openstack_version()
print test_ssh.get_cpu_info()
print test_ssh.get_l2agent_version("Open vSwitch agent")
if __name__ == "__main__":
main()

View File

@ -1,131 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystoneclient.openstack.common.apiclient.exceptions as keystone_exception
import log as logging
import users
LOG = logging.getLogger(__name__)
class Tenant(object):
"""
Holds the tenant resources
1. Provides ability to create users in a tenant
2. Uses the User class to perform all user resource creation and deletion
"""
def __init__(self, tenant_name, kloud, tenant_quota, reusing_users=None):
"""
Holds the tenant name
tenant id and keystone client
Also stores the auth_url for constructing credentials
Stores the shared network in case of testing and
tested cloud being on same cloud
"""
self.kloud = kloud
self.tenant_name = tenant_name
if not self.kloud.reusing_tenants:
self.tenant_object = self._get_tenant()
self.tenant_id = self.tenant_object.id
else:
LOG.info("Using tenant: " + self.tenant_name)
# Only admin can retrive the object via Keystone API
self.tenant_object = None
try:
# Try to see if we have the admin access to retrive the tenant id using
# tenant name directly from keystone
self.tenant_id = self.kloud.keystone.tenants.find(name=self.tenant_name).id
except Exception:
self.tenant_id = self.kloud.keystone.tenant_id
self.tenant_quota = tenant_quota
self.reusing_users = reusing_users
# Contains a list of user instance objects
self.user_list = []
def _get_tenant(self):
'''
Create or reuse a tenant object of a given name
'''
try:
LOG.info("Creating tenant: " + self.tenant_name)
tenant_object = \
self.kloud.keystone.tenants.create(tenant_name=self.tenant_name,
description="KloudBuster tenant",
enabled=True)
return tenant_object
except keystone_exception.Conflict as exc:
# ost likely the entry already exists:
# Conflict: Conflict occurred attempting to store project - Duplicate Entry (HTTP 409)
if exc.http_status != 409:
raise exc
LOG.info("Tenant %s already present, reusing it" % self.tenant_name)
return self.kloud.keystone.tenants.find(name=self.tenant_name)
# Should never come here
raise Exception()
def create_resources(self):
"""
Creates all the entities associated with
a user offloads tasks to user class
"""
if self.reusing_users:
for user_info in self.reusing_users:
user_name = user_info['username']
password = user_info['password']
user_instance = users.User(user_name, password, self, '_member_')
self.user_list.append(user_instance)
else:
# Loop over the required number of users and create resources
for user_count in xrange(self.kloud.scale_cfg['users_per_tenant']):
user_name = self.tenant_name + "-U" + str(user_count)
user_instance = users.User(user_name, user_name, self,
self.kloud.scale_cfg['keystone_admin_role'])
# Global list with all user instances
self.user_list.append(user_instance)
for user_instance in self.user_list:
# Now create the user resources like routers which inturn trigger network and
# vm creation
user_instance.create_resources()
def get_first_network(self):
if self.user_list:
return self.user_list[0].get_first_network()
return None
def get_all_instances(self):
all_instances = []
for user in self.user_list:
all_instances.extend(user.get_all_instances())
return all_instances
def get_prefix(self):
return self.kloud.get_prefix() + '_' + self.prefix
def delete_resources(self):
"""
Delete all user resources and than
deletes the tenant
"""
# Delete all the users in the tenant along with network and compute elements
for user in self.user_list:
user.delete_resources()
if not self.reusing_users:
# Delete the tenant (self)
self.kloud.keystone.tenants.delete(self.tenant_id)

View File

@ -1,236 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base_compute
import base_network
from cinderclient.v2 import client as cinderclient
import keystoneclient.openstack.common.apiclient.exceptions as keystone_exception
import log as logging
from neutronclient.v2_0 import client as neutronclient
from novaclient.client import Client
LOG = logging.getLogger(__name__)
class KBFlavorCheckException(Exception):
pass
class KBQuotaCheckException(Exception):
pass
class User(object):
"""
User class that stores router list
Creates and deletes N routers based on num of routers
"""
def __init__(self, user_name, password, tenant, user_role):
"""
Store all resources
1. Keystone client object
2. Tenant and User information
3. nova and neutron clients
4. router list
"""
self.user_name = user_name
self.password = password
self.tenant = tenant
self.router_list = []
# Store the nova, neutron and cinder client
self.nova_client = None
self.neutron_client = None
self.cinder_client = None
# Each user is associated to 1 key pair at most
self.key_pair = None
self.key_name = None
# Create the user within the given tenant associate
# admin role with user. We need admin role for user
# since we perform VM placement in future
#
# If running on top of existing tenants/users, skip
# the step for admin role association.
if not self.tenant.reusing_users:
self.user = self._get_user()
current_role = self.tenant.kloud.keystone.roles.find(name=user_role)
self.tenant.kloud.keystone.roles.add_user_role(self.user,
current_role,
tenant.tenant_id)
else:
# Only admin can retrive the object via Keystone API
self.user = None
LOG.info("Using user: " + self.user_name)
def _create_user(self):
LOG.info("Creating user: " + self.user_name)
return self.tenant.kloud.keystone.users.create(name=self.user_name,
password=self.password,
email="kloudbuster@localhost",
tenant_id=self.tenant.tenant_id)
def _get_user(self):
'''
Create a new user or reuse if it already exists (on a different tenant)
delete the user and create a new one
'''
try:
user = self._create_user()
return user
except keystone_exception.Conflict as exc:
# Most likely the entry already exists (leftover from past failed runs):
# Conflict: Conflict occurred attempting to store user - Duplicate Entry (HTTP 409)
if exc.http_status != 409:
raise exc
# Try to repair keystone by removing that user
LOG.warn("User creation failed due to stale user with same name: " +
self.user_name)
user = self.tenant.kloud.keystone.users.find(name=self.user_name)
LOG.info("Deleting stale user with name: " + self.user_name)
self.tenant.kloud.keystone.users.delete(user)
return self._create_user()
# Should never come here
raise Exception()
def delete_resources(self):
LOG.info("Deleting all user resources for user %s" % self.user_name)
# Delete key pair
if self.key_pair:
self.key_pair.remove_public_key()
# Delete all user routers
for router in self.router_list:
router.delete_router()
if not self.tenant.reusing_users:
# Finally delete the user
self.tenant.kloud.keystone.users.delete(self.user.id)
def update_tenant_quota(self, tenant_quota):
nova_quota = base_compute.NovaQuota(self.nova_client, self.tenant.tenant_id)
nova_quota.update_quota(**tenant_quota['nova'])
cinder_quota = base_compute.CinderQuota(self.cinder_client, self.tenant.tenant_id)
cinder_quota.update_quota(**tenant_quota['cinder'])
neutron_quota = base_network.NeutronQuota(self.neutron_client, self.tenant.tenant_id)
neutron_quota.update_quota(tenant_quota['neutron'])
def check_resources_quota(self):
# Flavor check
flavor_manager = base_compute.Flavor(self.nova_client)
flavor_to_use = None
for flavor in flavor_manager.list():
flavor = flavor.__dict__
if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor['disk'] < 10:
continue
flavor_to_use = flavor
break
if flavor_to_use:
LOG.info('Automatically selects flavor %s to instantiate VMs.' %
(flavor_to_use['name']))
self.tenant.kloud.flavor_to_use = flavor_to_use['name']
else:
LOG.error('Cannot find a flavor which meets the minimum '
'requirements to instantiate VMs.')
raise KBFlavorCheckException()
# Nova/Cinder/Neutron quota check
tenant_id = self.tenant.tenant_id
meet_quota = True
for quota_type in ['nova', 'cinder', 'neutron']:
if quota_type == 'nova':
quota_manager = base_compute.NovaQuota(self.nova_client, tenant_id)
elif quota_type == 'cinder':
quota_manager = base_compute.CinderQuota(self.cinder_client, tenant_id)
else:
quota_manager = base_network.NeutronQuota(self.neutron_client, tenant_id)
meet_quota = True
quota = quota_manager.get()
for key, value in self.tenant.tenant_quota[quota_type].iteritems():
if quota[key] < value:
meet_quota = False
break
if not meet_quota:
LOG.error('%s quota is too small. Minimum requirement: %s.' %
(quota_type, self.tenant.tenant_quota[quota_type]))
raise KBQuotaCheckException()
def create_resources(self):
"""
Creates all the User elements associated with a User
1. Creates the routers
2. Creates the neutron and nova client objects
"""
# Create a new neutron client for this User with correct credentials
creden = {}
creden['username'] = self.user_name
creden['password'] = self.password
creden['auth_url'] = self.tenant.kloud.auth_url
creden['tenant_name'] = self.tenant.tenant_name
# Create the neutron client to be used for all operations
self.neutron_client = neutronclient.Client(**creden)
# Create a new nova and cinder client for this User with correct credentials
creden_nova = {}
creden_nova['username'] = self.user_name
creden_nova['api_key'] = self.password
creden_nova['auth_url'] = self.tenant.kloud.auth_url
creden_nova['project_id'] = self.tenant.tenant_name
creden_nova['version'] = 2
self.nova_client = Client(**creden_nova)
self.cinder_client = cinderclient.Client(**creden_nova)
if self.tenant.kloud.reusing_tenants:
self.check_resources_quota()
else:
self.update_tenant_quota(self.tenant.tenant_quota)
config_scale = self.tenant.kloud.scale_cfg
# Create the user's keypair if configured
if config_scale.public_key_file:
self.key_pair = base_compute.KeyPair(self.nova_client)
self.key_name = self.user_name + '-K'
self.key_pair.add_public_key(self.key_name, config_scale.public_key_file)
# Find the external network that routers need to attach to
external_network = base_network.find_external_network(self.neutron_client)
# Create the required number of routers and append them to router list
LOG.info("Creating routers and networks for user %s" % self.user_name)
for router_count in range(config_scale['routers_per_user']):
router_instance = base_network.Router(self)
self.router_list.append(router_instance)
router_name = self.user_name + "-R" + str(router_count)
# Create the router and also attach it to external network
router_instance.create_router(router_name, external_network)
# Now create the network resources inside the router
router_instance.create_network_resources(config_scale)
def get_first_network(self):
if self.router_list:
return self.router_list[0].get_first_network()
return None
def get_all_instances(self):
all_instances = []
for router in self.router_list:
all_instances.extend(router.get_all_instances())
return all_instances

View File

@ -1,149 +0,0 @@
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
from perf_tool import PerfTool
from hdrh.histogram import HdrHistogram
import log as logging
LOG = logging.getLogger(__name__)
class WrkTool(PerfTool):
def __init__(self, instance, cfg_http_tool):
PerfTool.__init__(self, instance, cfg_http_tool)
def cmd_run_client(self, target_url, threads, connections,
rate_limit=0, timeout=5, connetion_type='Keep-alive'):
'''
Return the command for running the benchmarking tool
'''
duration_sec = self.instance.config.http_tool_configs.duration
if not rate_limit:
rate_limit = 65535
cmd = '%s -t%d -c%d -R%d -d%ds --timeout %ds -D2 -j %s' % \
(self.dest_path, threads, connections, rate_limit,
duration_sec, timeout, target_url)
LOG.kbdebug("[%s] %s" % (self.instance.vm_name, cmd))
return cmd
def cmd_parser_run_client(self, status, stdout, stderr):
if status:
return self.parse_error(stderr)
# Sample Output:
# {
# "seq": 1,
# "latency": {
# "min": 509440, "max": 798720,
# "counters": [
# 8, [1990, 1, 2027, 1],
# 9, [1032, 1, 1058, 1, 1085, 1, 1093, 1, 1110, 1, 1111, 1,
# 1128, 1, 1129, 1, 1146, 1, 1147, 1, 1148, 1, 1165, 1, 1166, 1, 1169, 1,
# 1172, 1, 1182, 1, 1184, 1, 1187, 1, 1191, 1, 1201, 1, 1203, 1, 1206, 1,
# 1209, 1, 1219, 1, 1221, 1, 1223, 1, 1235, 1, 1237, 1, 1239, 1, 1242, 1,
# 1255, 1, 1257, 1, 1260, 1, 1276, 1, 1282, 1, 1286, 1, 1294, 1, 1308, 1,
# 1312, 1, 1320, 1, 1330, 1, 1334, 1, 1346, 1, 1349, 1, 1352, 1, 1364, 1,
# 1374, 1, 1383, 1, 1401, 1, 1427, 1, 1452, 1, 1479, 1, 1497, 1, 1523, 1,
# 1541, 1, 1560, 1]
# ]
# },
# "errors": {"read": 1},
# "total_req": 58, "rps": 28.97, "rx_bps": "1.48MB"
# }
try:
result = json.loads(stdout)
http_total_req = int(result['total_req'])
http_rps = float(result['rps'])
http_tp_kbytes = result['rx_bps']
# Uniform in unit MB
ex_unit = 'KMG'.find(http_tp_kbytes[-2])
if ex_unit == -1:
raise ValueError
val = float(http_tp_kbytes[0:-2])
http_tp_kbytes = float(val * (1024 ** (ex_unit)))
if 'errors' in result:
errs = []
for key in ['connect', 'read', 'write', 'timeout', 'http_error']:
if key in result['errors']:
errs.append(int(result['errors'][key]))
else:
errs.append(0)
http_sock_err = errs[0] + errs[1] + errs[2]
http_sock_timeout = errs[3]
http_err = errs[4]
else:
http_sock_err = 0
http_sock_timeout = 0
http_err = 0
latency_stats = result['latency']
except Exception:
return self.parse_error('Could not parse: "%s"' % (stdout))
return self.parse_results(http_total_req=http_total_req,
http_rps=http_rps,
http_tp_kbytes=http_tp_kbytes,
http_sock_err=http_sock_err,
http_sock_timeout=http_sock_timeout,
http_err=http_err,
latency_stats=latency_stats)
@staticmethod
def consolidate_results(results):
all_res = {'tool': 'wrk2'}
total_count = len(results)
if not total_count:
return all_res
for key in ['http_rps', 'http_total_req', 'http_sock_err',
'http_sock_timeout', 'http_throughput_kbytes']:
all_res[key] = 0
for item in results:
if (key in item['results']):
all_res[key] += item['results'][key]
all_res[key] = int(all_res[key])
if 'latency_stats' in results[0]['results']:
# for item in results:
# print item['results']['latency_stats']
all_res['latency_stats'] = []
histogram = HdrHistogram(1, 3600 * 1000 * 1000, 2)
for item in results:
histogram.add_bucket_counts(item['results']['latency_stats'])
perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
latency_dict = histogram.get_percentile_to_value_dict(perc_list)
for key, value in latency_dict.iteritems():
all_res['latency_stats'].append([key, value])
all_res['latency_stats'].sort()
return all_res
@staticmethod
def consolidate_samples(results, vm_count):
all_res = WrkTool.consolidate_results(results)
total_count = len(results) / vm_count
if not total_count:
return all_res
all_res['http_rps'] = all_res['http_rps'] / total_count
all_res['http_throughput_kbytes'] = all_res['http_throughput_kbytes'] / total_count
return all_res