
1. Add support to do VM placements based on roles; 2. Remove all testing binaries from repo; 3. Fix the issue when creating static files in dib; Change-Id: Ic7b7e2a2710c6ac135eb54dd82cc807c56804aa2
122 lines
3.6 KiB
YAML
122 lines
3.6 KiB
YAML
# KloudBuster Default configuration file
|
|
|
|
# Name of the image to use for all test VMs (client, server and proxy)
|
|
# The image name must exist in OpenStack and must be built with the appropriate
|
|
# packages
|
|
image_name: 'Scale Image v8'
|
|
|
|
# Config options common to client and server side
|
|
keystone_admin_role: "admin"
|
|
|
|
# Cleanup all kloudbuster resources upon exit
|
|
cleanup_resources: True
|
|
|
|
# VM creation concurrency
|
|
vm_creation_concurrency: 10
|
|
|
|
#
|
|
# ssh access to the test VMs launched by kloudbuster is not required
|
|
# but can be handy if the user wants to ssh manually to any of them (for example
|
|
# to debug)
|
|
# public key to use to access all test VMs
|
|
# if empty will default to the user's public key (~/.ssh/id_rsa.pub) if it
|
|
# exists, otherwise will not provision any public key.
|
|
# If configured or available, a key pair will be added for each
|
|
# configured user.
|
|
#
|
|
public_key_file:
|
|
|
|
# SERVER SIDE CONFIG OPTIONS
|
|
server:
|
|
# Flavor to use for the test images
|
|
flavor:
|
|
# Number of vCPUs for the flavor
|
|
vcpus: 1
|
|
# Memory for the flavor in MB
|
|
ram: 2048
|
|
# Size of local disk in GB
|
|
disk: 20
|
|
|
|
# Number of tenants to be created on the cloud
|
|
number_tenants: 1
|
|
|
|
# Number of Users to be created inside the tenant
|
|
users_per_tenant: 1
|
|
|
|
# Number of routers to be created within the context of each User
|
|
# For now support only 1 router per user
|
|
routers_per_user: 1
|
|
|
|
# Number of networks to be created within the context of each Router
|
|
# Assumes 1 subnet per network
|
|
networks_per_router: 1
|
|
|
|
# Number of VM instances to be created within the context of each Network
|
|
vms_per_network: 2
|
|
|
|
# Number of security groups per network
|
|
secgroups_per_network: 1
|
|
|
|
# Assign floating IP for every VM
|
|
use_floatingip: False
|
|
|
|
# Placement hint
|
|
# Availability zone to use for servers in the server cloud
|
|
# Leave empty if you prefer to have the Nova scheduler place the server VMs
|
|
# If you want to pick a particular AZ, put that AZ name (e.g. nova)
|
|
# If you want a paticular compute host, put the AZ and compute host names s
|
|
# eparated by ':' (e.g. nova:tme100)
|
|
availability_zone:
|
|
|
|
|
|
# CLIENT SIDE CONFIG OPTIONS
|
|
client:
|
|
# Assign floating IP for every VM
|
|
use_floatingip: False
|
|
|
|
# Flavor to use for the test images
|
|
flavor:
|
|
# Number of vCPUs for the flavor
|
|
vcpus: 1
|
|
# Memory for the flavor in MB
|
|
ram: 2048
|
|
# Size of local disk in GB
|
|
disk: 20
|
|
|
|
# Placement hint
|
|
# Availability zone to use for clients in the client cloud
|
|
# Leave empty if you prefer to have the Nova scheduler place the server VMs
|
|
# If you want to pick a particular AZ, put that AZ name (e.g. nova)
|
|
# If you want a paticular compute host, put the AZ and compute host names s
|
|
# eparated by ':' (e.g. nova:tme100)
|
|
availability_zone:
|
|
|
|
# Interval for polling status from all VMs
|
|
polling_interval: 5
|
|
|
|
# Tooling
|
|
tp_tool:
|
|
name: 'nuttcp'
|
|
dest_path: '/var/tmp/nuttcp-7.3.2'
|
|
http_tool:
|
|
name: 'wrk'
|
|
dest_path: '/var/tmp/wrk2-3.1.1'
|
|
|
|
# HTTP tool specific configs (per VM)
|
|
http_tool_configs:
|
|
# Threads to run tests
|
|
threads: 1
|
|
# Connections to be kept concurrently per VM
|
|
connections: 1000
|
|
# Rate limit in RPS per client (0 for unlimit)
|
|
rate_limit: 500
|
|
# Timeout for HTTP requests
|
|
timeout: 5
|
|
# Connection Type: "Keep-alive", "New"
|
|
connection_type: 'Keep-alive'
|
|
# Duration of testing tools (seconds)
|
|
duration: 30
|
|
|
|
# Prompt before running benchmarking tools
|
|
prompt_before_run: False
|