add regtest script and conf
Change-Id: Ieb82235bd6b436b6716992c1b45698b083d03d86 remove pre-existing chef files before uploading new chef files. having legacy stale chef roles/cookbooks/databags in the knife source directory will result in possible failure of dependency check, so gurantee we have the latest code in that directory. clean installation logs when refresh database. Change-Id: I6609e90e51d8ca3560f1848f2de62e148f0c5b49
This commit is contained in:
parent
93944da8ee
commit
c4674360ef
155
bin/client.py
155
bin/client.py
@ -132,7 +132,7 @@ flags.add('hostnames',
|
||||
flags.add('host_networks',
|
||||
help=(
|
||||
'semicomma seperated host name and its networks '
|
||||
'<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscous>,...'
|
||||
'<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...'
|
||||
),
|
||||
default='')
|
||||
flags.add('partitions',
|
||||
@ -150,7 +150,13 @@ flags.add('network_mapping',
|
||||
flags.add('host_roles',
|
||||
help=(
|
||||
'semicomma separated host roles '
|
||||
'<hostname>=<comma separated roles>',
|
||||
'<hostname>=<comma separated roles>'
|
||||
),
|
||||
default='')
|
||||
flags.add('default_roles',
|
||||
help=(
|
||||
'comma seperated default roles '
|
||||
'<rolename>'
|
||||
),
|
||||
default='')
|
||||
flags.add('deployment_timeout',
|
||||
@ -174,13 +180,13 @@ def _get_client():
|
||||
|
||||
def _login(client):
|
||||
"""get apiclient token."""
|
||||
status, token = client.login(
|
||||
status, resp = client.get_token(
|
||||
flags.OPTIONS.compass_user_email,
|
||||
flags.OPTIONS.compass_user_password
|
||||
)
|
||||
logging.info(
|
||||
'login status: %s, token: %s',
|
||||
status, token
|
||||
'login status: %s, resp: %s',
|
||||
status, resp
|
||||
)
|
||||
if status >= 400:
|
||||
raise Exception(
|
||||
@ -188,7 +194,7 @@ def _login(client):
|
||||
flags.OPTIONS.compass_server,
|
||||
flags.OPTIONS.compass_user_email
|
||||
)
|
||||
return token
|
||||
return resp['token']
|
||||
|
||||
|
||||
def _get_machines(client):
|
||||
@ -364,22 +370,36 @@ def _get_adapter(client):
|
||||
|
||||
|
||||
def _add_subnets(client):
|
||||
status, resp = client.list_subnets()
|
||||
logging.info('get all subnets status: %s resp: %s', status, resp)
|
||||
if status >= 400:
|
||||
msg = 'failed to get subnets'
|
||||
raise Exception(msg)
|
||||
|
||||
all_subnets = {}
|
||||
for subnet in resp:
|
||||
all_subnets[subnet['subnet']] = subnet
|
||||
|
||||
subnets = [
|
||||
subnet for subnet in flags.OPTIONS.subnets.split(',')
|
||||
if subnet
|
||||
]
|
||||
if not subnets:
|
||||
raise Exception(
|
||||
'there is no subnets added')
|
||||
subnet_mapping = {}
|
||||
for subnet in subnets:
|
||||
status, resp = client.add_subnet(subnet)
|
||||
logging.info('add subnet %s status %s response %s',
|
||||
subnet, status, resp)
|
||||
if status >= 400:
|
||||
msg = 'failed to add subnet %s' % subnet
|
||||
raise Exception(msg)
|
||||
subnet_mapping[resp['subnet']] = resp['id']
|
||||
if subnet not in all_subnets:
|
||||
status, resp = client.add_subnet(subnet)
|
||||
logging.info('add subnet %s status %s response %s',
|
||||
subnet, status, resp)
|
||||
if status >= 400:
|
||||
msg = 'failed to add subnet %s' % subnet
|
||||
raise Exception(msg)
|
||||
subnet_mapping[resp['subnet']] = resp['id']
|
||||
else:
|
||||
subnet_mapping[subnet] = all_subnets[subnet]['id']
|
||||
if not subnet_mapping:
|
||||
raise Exception(
|
||||
'there is not subnets found'
|
||||
)
|
||||
return subnet_mapping
|
||||
|
||||
|
||||
@ -401,6 +421,14 @@ def _add_cluster(client, adapter_id, os_id, flavor_id, machines):
|
||||
|
||||
cluster = resp
|
||||
cluster_id = cluster['id']
|
||||
flavor = cluster['flavor']
|
||||
roles = flavor['roles']
|
||||
role_mapping = {}
|
||||
for role in roles:
|
||||
if role.get('optional', False):
|
||||
role_mapping[role['name']] = 1
|
||||
else:
|
||||
role_mapping[role['name']] = 0
|
||||
hostnames = [
|
||||
hostname for hostname in flags.OPTIONS.hostnames.split(',')
|
||||
if hostname
|
||||
@ -434,7 +462,7 @@ def _add_cluster(client, adapter_id, os_id, flavor_id, machines):
|
||||
msg = 'machines %s to add to the cluster %s while hosts %s' % (
|
||||
machines, cluster_name, host_mapping)
|
||||
raise Exception(msg)
|
||||
return (cluster_id, host_mapping)
|
||||
return (cluster_id, host_mapping, role_mapping)
|
||||
|
||||
|
||||
def _set_cluster_os_config(client, cluster_id, host_ips):
|
||||
@ -452,8 +480,7 @@ def _set_cluster_os_config(client, cluster_id, host_ips):
|
||||
]
|
||||
compass_name = socket.gethostname()
|
||||
compass_ip = socket.gethostbyname(compass_name)
|
||||
if http_proxy and not no_proxy:
|
||||
no_proxy = ['127.0.0.1', compass_name, compass_ip]
|
||||
if http_proxy:
|
||||
for hostname, ips in host_ips.items():
|
||||
no_proxy.append(hostname)
|
||||
no_proxy.extend(ips)
|
||||
@ -510,9 +537,14 @@ def _set_cluster_os_config(client, cluster_id, host_ips):
|
||||
}
|
||||
partitions = [
|
||||
partition for partition in flags.OPTIONS.partitions.split(',')
|
||||
if partition
|
||||
]
|
||||
os_config['partition'] = {}
|
||||
for partition in partitions:
|
||||
if '=' not in partition:
|
||||
raise Exception(
|
||||
'there is no = in partition %s' % partition
|
||||
)
|
||||
partition_name, partition_value = partition.split('=', 1)
|
||||
if not partition_name:
|
||||
raise Exception(
|
||||
@ -556,6 +588,12 @@ def _set_host_networking(client, host_mapping, subnet_mapping):
|
||||
interface, network_properties_str = network.split('=', 1)
|
||||
network_properties = network_properties_str.split('|')
|
||||
ip_addr = network_properties[0]
|
||||
if not ip_addr:
|
||||
raise Exception(
|
||||
'ip is not set for host %s interface %s' % (
|
||||
hostname, interface
|
||||
)
|
||||
)
|
||||
ip = netaddr.IPAddress(ip_addr)
|
||||
subnet_id = None
|
||||
for subnet_addr, subnetid in subnet_mapping.items():
|
||||
@ -658,7 +696,25 @@ def _set_cluster_package_config(client, cluster_id):
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def _set_host_roles(client, cluster_id, host_mapping):
|
||||
def _set_host_roles(client, cluster_id, host_id, roles, role_mapping):
|
||||
status, response = client.update_cluster_host(
|
||||
cluster_id, host_id, roles=roles)
|
||||
logging.info(
|
||||
'set cluster %s host %s roles %s status %s: %s',
|
||||
cluster_id, host_id, roles, status, response
|
||||
)
|
||||
if status >= 400:
|
||||
raise Exception(
|
||||
'failed to set cluster %s host %s roles %s' % (
|
||||
cluster_id, host_id, roles
|
||||
)
|
||||
)
|
||||
for role in roles:
|
||||
if role in role_mapping and role_mapping[role] > 0:
|
||||
role_mapping[role] -= 1
|
||||
|
||||
|
||||
def _set_hosts_roles(client, cluster_id, host_mapping, role_mapping):
|
||||
host_roles = {}
|
||||
for host_str in flags.OPTIONS.host_roles.split(';'):
|
||||
if not host_str:
|
||||
@ -672,19 +728,50 @@ def _set_host_roles(client, cluster_id, host_mapping):
|
||||
)
|
||||
host_id = host_mapping[hostname]
|
||||
roles = [role for role in roles_str.split(',') if role]
|
||||
status, response = client.update_cluster_host(
|
||||
cluster_id, host_id, roles=roles)
|
||||
logging.info(
|
||||
'set cluster %s host %s roles %s status %s: %s',
|
||||
cluster_id, hostname, roles, status, response
|
||||
)
|
||||
if status >= 400:
|
||||
raise Exception(
|
||||
'failed to set cluster %s host %s roles %s' % (
|
||||
cluster_id, host_id, roles
|
||||
)
|
||||
)
|
||||
_set_host_roles(client, cluster_id, host_id, roles, role_mapping)
|
||||
host_roles[hostname] = roles
|
||||
|
||||
# assign unassigned roles to unassigned hosts
|
||||
unassigned_hostnames = []
|
||||
for hostname, _ in host_mapping.items():
|
||||
if hostname not in host_roles:
|
||||
unassigned_hostnames.append(hostname)
|
||||
unassigned_roles = []
|
||||
for role, count in role_mapping.items():
|
||||
if count > 0:
|
||||
unassigned_roles.apend(role)
|
||||
if len(unassigned_hostnames) < len(unassigned_roles):
|
||||
raise Exception(
|
||||
'there is no enough hosts %s to assign roles %s' % (
|
||||
unassigned_hostnames, unassigned_roles
|
||||
)
|
||||
)
|
||||
for offset, role in enumerate(unassigned_roles):
|
||||
hostname = unassigned_hostnames[offset]
|
||||
host_id = host_mapping[hostname]
|
||||
roles = [role]
|
||||
_set_host_roles(client, cluster_id, host_id, roles, role_mapping)
|
||||
host_roles[hostname] = roles
|
||||
unassigned_hostnames = unassigned_hostnames[len(unassigned_roles):]
|
||||
unassigned_roles = []
|
||||
|
||||
# assign default roles to unassigned hosts
|
||||
default_roles = [
|
||||
role for role in flags.OPTIONS.default_roles.split(',')
|
||||
if role
|
||||
]
|
||||
if not default_roles and unassigned_hostnames:
|
||||
raise Exception(
|
||||
'hosts %s do not have roles set' % unassigned_hostnames
|
||||
)
|
||||
for hostname in unassigned_hostnames:
|
||||
host_id = host_mapping[hostname]
|
||||
roles = [default_roles[0]]
|
||||
_set_host_roles(client, cluster_id, host_id, roles, role_mapping)
|
||||
host_roles[hostname] = roles
|
||||
default_roles = default_roles[1:]
|
||||
default_roles.extend(roles)
|
||||
|
||||
return host_roles
|
||||
|
||||
|
||||
@ -825,14 +912,14 @@ def main():
|
||||
machines = _get_machines(client)
|
||||
subnet_mapping = _add_subnets(client)
|
||||
adapter_id, os_id, flavor_id = _get_adapter(client)
|
||||
cluster_id, host_mapping = _add_cluster(
|
||||
cluster_id, host_mapping, role_mapping = _add_cluster(
|
||||
client, adapter_id, os_id, flavor_id, machines)
|
||||
host_ips = _set_host_networking(
|
||||
client, host_mapping, subnet_mapping
|
||||
)
|
||||
_set_cluster_os_config(client, cluster_id, host_ips)
|
||||
_set_cluster_package_config(client, cluster_id)
|
||||
_set_host_roles(client, cluster_id, host_mapping)
|
||||
_set_hosts_roles(client, cluster_id, host_mapping, role_mapping)
|
||||
_deploy_clusters(client, cluster_id, host_mapping)
|
||||
_get_installing_progress(client, cluster_id, host_mapping)
|
||||
_check_dashboard_links(client, cluster_id)
|
||||
|
@ -32,6 +32,8 @@ from compass.actions import deploy
|
||||
from compass.actions import reinstall
|
||||
from compass.api import app
|
||||
from compass.db.api import database
|
||||
from compass.db.api import switch as switch_api
|
||||
from compass.db.api import user as user_api
|
||||
from compass.tasks.client import celery
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
@ -122,6 +124,44 @@ def dropdb():
|
||||
database.drop_db()
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def set_switch_machines():
|
||||
"""Set switches and machines.
|
||||
|
||||
.. note::
|
||||
--switch_machines_file is the filename which stores all switches
|
||||
and machines information.
|
||||
each line in fake_switches_files presents one machine.
|
||||
the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac>
|
||||
or switch,<switch_ip>,<switch_vendor>,<switch_version>,
|
||||
<switch_community>,<switch_state>
|
||||
"""
|
||||
if not flags.OPTIONS.switch_machines_file:
|
||||
print 'flag --switch_machines_file is missing'
|
||||
return
|
||||
database.init()
|
||||
switches, switch_machines = util.get_switch_machines_from_file(
|
||||
flags.OPTIONS.switch_machines_file)
|
||||
user = user_api.get_user_object(
|
||||
setting.COMPASS_ADMIN_EMAIL
|
||||
)
|
||||
switch_mapping = {}
|
||||
for switch in switches:
|
||||
added_switch = switch_api.add_switch(
|
||||
user, False, **switch
|
||||
)
|
||||
switch_mapping[switch['ip']] = added_switch['id']
|
||||
for switch_ip, machines in switch_machines.items():
|
||||
if switch_ip not in switch_mapping:
|
||||
print 'switch ip %s not found' % switch_ip
|
||||
sys.exit(1)
|
||||
switch_id = switch_mapping[switch_ip]
|
||||
for machine in machines:
|
||||
switch_api.add_switch_machine(
|
||||
user, switch_id, False, **machine
|
||||
)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def reinstall_clusters():
|
||||
"""Reinstall hosts in clusters.
|
||||
|
@ -49,7 +49,7 @@ flags.add('run_interval', type='int',
|
||||
def progress_update():
|
||||
"""entry function."""
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.update_progress')
|
||||
celery.send_task('compass.tasks.update_progress', ())
|
||||
else:
|
||||
try:
|
||||
update_progress.update_progress()
|
||||
|
@ -6,7 +6,7 @@ echo "You may run '/opt/compass/bin/clean_nodes.sh' to clean nodes on chef serve
|
||||
echo "You may run '/opt/compass/bin/clean_clients.sh' to clean clients on chef server"
|
||||
echo "you may run '/opt/compass/bin/clean_environments.sh' to clean environments on chef server"
|
||||
echo "you may run '/opt/compass/bin/remove_systems.sh' to clean systems on cobbler"
|
||||
#/opt/compass/bin/clean_installation_logs.py
|
||||
/opt/compass/bin/clean_installation_logs.py
|
||||
service httpd restart
|
||||
service rsyslog restart
|
||||
service redis restart
|
||||
|
@ -97,9 +97,9 @@ for v in PRESET_VALUES:
|
||||
client = Client(COMPASS_SERVER_URL)
|
||||
|
||||
# login
|
||||
status, token = client.login(COMPASS_LOGIN_EMAIL, COMPASS_LOGIN_PASSWORD)
|
||||
status, response = client.login(COMPASS_LOGIN_EMAIL, COMPASS_LOGIN_PASSWORD)
|
||||
print '============================================================'
|
||||
print 'login status: %s token: %s' % (status, token)
|
||||
print 'login status: %s response: %s' % (status, response)
|
||||
if status >= 400:
|
||||
sys.exit(1)
|
||||
|
||||
|
@ -24,12 +24,15 @@ class Client(object):
|
||||
"""compass restful api wrapper"""
|
||||
|
||||
def __init__(self, url, headers=None, proxies=None, stream=None):
|
||||
|
||||
logging.info('create api client %s', url)
|
||||
self.url_ = url
|
||||
self.session_ = requests.Session()
|
||||
|
||||
if headers:
|
||||
self.session_.headers = headers
|
||||
self.session_.headers.update(headers)
|
||||
self.session_.headers.update({
|
||||
'Accept': 'application/json'
|
||||
})
|
||||
|
||||
if proxies is not None:
|
||||
self.session_.proxies = proxies
|
||||
@ -56,6 +59,7 @@ class Client(object):
|
||||
|
||||
def _get(self, req_url, data=None):
|
||||
url = '%s%s' % (self.url_, req_url)
|
||||
logging.debug('get %s with data %s', url, data)
|
||||
if data:
|
||||
resp = self.session_.get(url, params=data)
|
||||
else:
|
||||
@ -65,6 +69,7 @@ class Client(object):
|
||||
|
||||
def _post(self, req_url, data=None):
|
||||
url = '%s%s' % (self.url_, req_url)
|
||||
logging.debug('post %s with data %s', url, data)
|
||||
if data:
|
||||
resp = self.session_.post(url, json.dumps(data))
|
||||
else:
|
||||
@ -75,6 +80,7 @@ class Client(object):
|
||||
def _put(self, req_url, data=None):
|
||||
"""encapsulate put method."""
|
||||
url = '%s%s' % (self.url_, req_url)
|
||||
logging.debug('put %s with data %s', url, data)
|
||||
if data:
|
||||
resp = self.session_.put(url, json.dumps(data))
|
||||
else:
|
||||
@ -84,6 +90,7 @@ class Client(object):
|
||||
|
||||
def _patch(self, req_url, data=None):
|
||||
url = '%s%s' % (self.url_, req_url)
|
||||
logging.debug('patch %s with data %s', url, data)
|
||||
if data:
|
||||
resp = self.session_.patch(url, json.dumps(data))
|
||||
else:
|
||||
@ -93,17 +100,23 @@ class Client(object):
|
||||
|
||||
def _delete(self, req_url):
|
||||
url = '%s%s' % (self.url_, req_url)
|
||||
logging.debug('delete %s', url)
|
||||
return self._get_response(self.session_.delete(url))
|
||||
|
||||
def login(self, email, password):
|
||||
return self._login(email, password)
|
||||
|
||||
def _login(self, email, password):
|
||||
credential = {}
|
||||
credential['email'] = email
|
||||
credential['password'] = password
|
||||
token = self._post('/users/token', data=credential)
|
||||
return token
|
||||
return self._post('/users/login', data=credential)
|
||||
|
||||
def get_token(self, email, password):
|
||||
credential = {}
|
||||
credential['email'] = email
|
||||
credential['password'] = password
|
||||
status, resp = self._post('/users/token', data=credential)
|
||||
if status < 400:
|
||||
self.session_.headers.update({'X-Auth-Token': resp['token']})
|
||||
return status, resp
|
||||
|
||||
def get_users(self):
|
||||
users = self._get('/users')
|
||||
@ -112,8 +125,7 @@ class Client(object):
|
||||
def list_switches(
|
||||
self,
|
||||
switch_ips=None,
|
||||
switch_ip_networks=None,
|
||||
limit=None):
|
||||
switch_ip_networks=None):
|
||||
"""list switches."""
|
||||
params = {}
|
||||
if switch_ips:
|
||||
@ -122,9 +134,6 @@ class Client(object):
|
||||
if switch_ip_networks:
|
||||
params['switchIpNetwork'] = switch_ip_networks
|
||||
|
||||
if limit:
|
||||
params['limit'] = limit
|
||||
|
||||
switchlist = self._get('/switches', data=params)
|
||||
return switchlist
|
||||
|
||||
@ -371,13 +380,12 @@ class Client(object):
|
||||
return self._put('/switch-machines/%s' % switchmachine_id, data=data)
|
||||
|
||||
def patch_switchmachine(self, switchmachine_id,
|
||||
patched_vlans=None, raw_data=None):
|
||||
vlans=None, raw_data=None):
|
||||
data = {}
|
||||
if raw_data:
|
||||
data = raw_data
|
||||
|
||||
elif patched_vlans:
|
||||
data['patched_vlans'] = patched_vlans
|
||||
elif vlans:
|
||||
data['vlans'] = vlans
|
||||
|
||||
return self._patch('/switch-machines/%s' % switchmachine_id, data=data)
|
||||
|
||||
@ -389,37 +397,18 @@ class Client(object):
|
||||
if mac:
|
||||
data['mac'] = mac
|
||||
|
||||
if tag:
|
||||
data['tag'] = mac
|
||||
|
||||
if tag:
|
||||
data['location'] = location
|
||||
|
||||
return self._get('/machines', data=data)
|
||||
|
||||
def get_machine(self, machine_id, id=None, mac=None, ipmi_credentials=None,
|
||||
tag=None, location=None, created_at=None, updated_at=None):
|
||||
data = {}
|
||||
if id:
|
||||
data['id'] = id
|
||||
|
||||
if mac:
|
||||
data['mac'] = mac
|
||||
|
||||
if ipmi_credentials:
|
||||
data['ipmi_credentials'] = ipmi_credentials
|
||||
|
||||
if tag:
|
||||
data['tag'] = tag
|
||||
|
||||
if location:
|
||||
data['location'] = location
|
||||
|
||||
if created_at:
|
||||
data['created_at'] = created_at
|
||||
return self._get('/machines', data=data)
|
||||
|
||||
if updated_at:
|
||||
data['updated_at'] = updated_at
|
||||
def get_machine(self, machine_id):
|
||||
data = {}
|
||||
if id:
|
||||
data['id'] = id
|
||||
|
||||
return self._get('/machines/%s' % machine_id, data=data)
|
||||
|
||||
@ -440,21 +429,21 @@ class Client(object):
|
||||
|
||||
return self._put('/machines/%s' % machine_id, data=data)
|
||||
|
||||
def patch_machine(self, machine_id, patched_ipmi_credentials=None,
|
||||
patched_tag=None, patched_location=None,
|
||||
def patch_machine(self, machine_id, ipmi_credentials=None,
|
||||
tag=None, location=None,
|
||||
raw_data=None):
|
||||
data = {}
|
||||
if raw_data:
|
||||
data = raw_data
|
||||
else:
|
||||
if patched_ipmi_credentials:
|
||||
data['patched_ipmi_credentials'] = patched_ipmi_credentials
|
||||
if ipmi_credentials:
|
||||
data['ipmi_credentials'] = ipmi_credentials
|
||||
|
||||
if patched_tag:
|
||||
data['patched_tag'] = patched_tag
|
||||
if tag:
|
||||
data['tag'] = tag
|
||||
|
||||
if patched_location:
|
||||
data['patched_location'] = patched_location
|
||||
if location:
|
||||
data['location'] = location
|
||||
|
||||
return self._patch('/machines/%s' % machine_id, data=data)
|
||||
|
||||
|
@ -218,24 +218,43 @@ def pretty_print(*contents):
|
||||
print "\n".join(content for content in contents)
|
||||
|
||||
|
||||
def get_clusters_from_str(clusters_str):
|
||||
"""get clusters from string."""
|
||||
clusters = {}
|
||||
for cluster_and_hosts in clusters_str.split(';'):
|
||||
if not cluster_and_hosts:
|
||||
continue
|
||||
def get_switch_machines_from_file(filename):
|
||||
"""get switch machines from file."""
|
||||
switches = []
|
||||
switch_machines = {}
|
||||
with open(filename) as switch_file:
|
||||
for line in switch_file:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
# ignore empty line
|
||||
continue
|
||||
|
||||
if ':' in cluster_and_hosts:
|
||||
cluster_str, hosts_str = cluster_and_hosts.split(
|
||||
':', 1)
|
||||
else:
|
||||
cluster_str = cluster_and_hosts
|
||||
hosts_str = ''
|
||||
if line.startswith('#'):
|
||||
# ignore comments
|
||||
continue
|
||||
|
||||
hosts = [
|
||||
host for host in hosts_str.split(',')
|
||||
if host
|
||||
]
|
||||
clusters[cluster_str] = hosts
|
||||
columns = [column for column in line.split(',')]
|
||||
if not columns:
|
||||
# ignore empty line
|
||||
continue
|
||||
|
||||
return clusters
|
||||
if columns[0] == 'switch':
|
||||
(switch_ip, switch_vendor, switch_version,
|
||||
switch_community, switch_state) = columns[1:]
|
||||
switches.append({
|
||||
'ip': switch_ip,
|
||||
'vendor': switch_vendor,
|
||||
'credentials': {
|
||||
'version': switch_version,
|
||||
'community': switch_community,
|
||||
},
|
||||
'state': switch_state,
|
||||
})
|
||||
elif columns[0] == 'machine':
|
||||
switch_ip, switch_port, mac = columns[1:]
|
||||
switch_machines.setdefault(switch_ip, []).append({
|
||||
'mac': mac,
|
||||
'port': switch_port,
|
||||
})
|
||||
|
||||
return (switches, switch_machines)
|
||||
|
@ -25,6 +25,7 @@ if [[ "$?" != "0" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo rm -rf /var/chef
|
||||
sudo mkdir -p /var/chef/cookbooks/
|
||||
sudo cp -r $ADAPTERS_HOME/chef/cookbooks/* /var/chef/cookbooks/
|
||||
if [ $? -ne 0 ]; then
|
||||
|
@ -108,7 +108,7 @@ sudo cp -rf $ADAPTERS_HOME/cobbler/kickstarts/default.ks /var/lib/cobbler/kickst
|
||||
sudo cp -rf $ADAPTERS_HOME/cobbler/kickstarts/default.seed /var/lib/cobbler/kickstarts/
|
||||
sudo chmod 666 /var/lib/cobbler/kickstarts/default.ks
|
||||
sudo chmod 666 /var/lib/cobbler/kickstarts/default.seed
|
||||
sudo mkdir /var/www/cblr_ks
|
||||
sudo mkdir -p /var/www/cblr_ks
|
||||
sudo chmod 755 /var/www/cblr_ks
|
||||
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/cobbler.conf /etc/httpd/conf.d/
|
||||
chmod 644 /etc/httpd/conf.d/cobbler.conf
|
||||
|
@ -115,6 +115,10 @@ if [[ "$?" != "0" ]]; then
|
||||
else
|
||||
echo "compassed service is refreshed"
|
||||
fi
|
||||
/opt/compass/bin/clean_nodes.sh
|
||||
/opt/compass/bin/clean_clients.sh
|
||||
/opt/compass/bin/clean_environments.sh
|
||||
/opt/compass/bin/remove_systems.sh
|
||||
|
||||
sudo service httpd status
|
||||
if [[ "$?" != "0" ]]; then
|
||||
|
@ -3,54 +3,117 @@ export VIRT_NUM=${VIRT_NUM:-'1'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'10'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'8192'}
|
||||
export VIRT_DISK=${VIRT_DISK:-'30G'}
|
||||
export CLEAN_OLD_DATA=${CLEAN_OLD_DATA:-true}
|
||||
|
||||
export COMPASS_SERVER_URL=${COMPASS_SERVER_URL:-"http://$ipaddr/api"}
|
||||
export COMPASS_USER_EMAIL=${COMPASS_USER_EMAIL:-'admin@huawei.com'}
|
||||
export COMPASS_USER_PASSWORD=${COMPASS_USER_PASSWORD:-'admin'}
|
||||
export CLUSTER_NAME=${CLUSTER_NAME:-'test_cluster'}
|
||||
export SWITCH_IPS=${SWITCH_IPS:-'10.145.81.219'}
|
||||
export SWITCH_VERSION=${SWITCH_VERSION:-'2c'}
|
||||
export SWITCH_COMMUNITY=${SWITCH_COMMUNITY:-'public'}
|
||||
export SWITCH_CREDENTIAL=${SWITCH_CREDENTIAL:-"version=${SWITCH_VERSION},community=${SWITCH_COMMUNITY}"}
|
||||
export USE_POLL_SWITCHES=${USE_POLL_SWITCHES:-true}
|
||||
export USE_POLL_SWITCHES=${USE_POLL_SWITCHES:-false}
|
||||
|
||||
export HOST_ROLES=${HOST_ROLES:-''}
|
||||
export LANGUAGE=${LANGUAGE:-'EN'}
|
||||
export TIMEZONE=${TIMEZONE:-'America/Los_Angeles'}
|
||||
export HOSTNAMES=${HOSTNAMES:-'allinone'}
|
||||
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)centos.*'}
|
||||
export ADAPTER_TARGET_SYSTEM_PATTERN=${ADAPTER_TARGET_SYSTEM_PATTERN:-'openstack.*'}
|
||||
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'allinone'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'allinone=allinone-compute'}
|
||||
export DEFAULT_ROLES=${DEFAULT_ROLES:-'allinone-compute'}
|
||||
|
||||
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.50}
|
||||
export MANAGEMENT_IP_END=${MANAGEMENT_IP_END:-`echo $ipaddr |cut -d. -f'1 2 3'`.100}
|
||||
export MANAGEMENT_NETMASK=${MANAGEMENT_NETMASK:-'255.255.255.0'}
|
||||
export MANAGEMENT_NIC=${MANAGEMENT_NIC:-'eth0'}
|
||||
export MANAGEMENT_PROMISC=${MANAGEMENT_PROMISC:-'0'}
|
||||
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.1'}
|
||||
export TENANT_IP_END=${TENANT_IP_END:-'172.16.2.254'}
|
||||
export TENANT_NETMASK=${TENANT_NETMASK:-'255.255.255.0'}
|
||||
export TENANT_NIC=${TENANT_NIC:-'eth1'}
|
||||
export TENANT_PROMISC=${TENANT_PROMISC:-'0'}
|
||||
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.1'}
|
||||
export PUBLIC_IP_END=${PUBLIC_IP_END:-'172.16.3.254'}
|
||||
export PUBLIC_NETMASK=${PUBLIC_NETMASK:-'255.255.255.0'}
|
||||
export PUBLIC_NIC=${PUBLIC_NIC:-'eth2'}
|
||||
export PUBLIC_PROMISC=${PUBLIC_PROMISC:-'1'}
|
||||
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.1'}
|
||||
export STORAGE_IP_END=${STORAGE_IP_END:-'172.16.4.254'}
|
||||
export STORAGE_NETMASK=${STORAGE_NETMASK:-'255.255.255.0'}
|
||||
export STORAGE_NIC=${STORAGE_NIC:-'eth3'}
|
||||
export STORAGE_PROMISC=${STORAGE_PROMISC:-'0'}
|
||||
export NAMESERVERS=${NAMESERVERS:-$ipaddr}
|
||||
export NTP_SERVER=${NTP_SERVER:-$ipaddr}
|
||||
export GATEWAY=${GATEWAY:-$ipaddr}
|
||||
export PROXY=${PROXY:-http://$ipaddr:3128}
|
||||
export SEARCH_PATH=${SEARCH_PATH:-'ods.com'}
|
||||
export HA_VIP=${HA_VIP:-''}
|
||||
export NETWORKING=${NETWORKING:-"nameservers=$NAMESERVERS;search_path=$SEARCH_PATH;gateway=$GATEWAY;proxy=$PROXY;ntp_server=$NTP_SERVER;ha_vip=$HA_VIP;management_ip_start=$MANAGEMENT_IP_START;management_ip_end=$MANAGEMENT_IP_END;management_netmask=$MANAGEMENT_NETMASK;management_gateway=;management_nic=$MANAGEMENT_NIC;management_promisc=$MANAGEMENT_PROMISC;tenant_ip_start=$TENANT_IP_START;tenant_ip_end=$TENANT_IP_END;tenant_netmask=$TENANT_NETMASK;tenant_gateway=;tenant_nic=$TENANT_NIC;tenant_promisc=$TENANT_PROMISC;public_ip_start=$PUBLIC_IP_START;public_ip_end=$PUBLIC_IP_END;public_netmask=$PUBLIC_NETMASK;public_gateway=;public_nic=$PUBLIC_NIC;public_promisc=$PUBLIC_PROMISC;storage_ip_start=$STORAGE_IP_START;storage_ip_end=$STORAGE_IP_END;storage_netmask=$STORAGE_NETMASK;storage_gateway=;storage_nic=$STORAGE_NIC;storage_promisc=$STORAGE_PROMISC"}
|
||||
export IGNORE_PROXY=${IGNORE_PROXY:-"127.0.0.1,localhost,$ipaddr,$HOSTNAME"}
|
||||
export DOMAIN=${DOMAIN:-'ods.com'}
|
||||
export SEARCH_PATH=${SEARCH_PATH:-${DOMAIN}}
|
||||
|
||||
export HOME_PERCENTAGE=${HOME_PERCENTAGE:-'5'}
|
||||
export TMP_PERCENTAGE=${TMP_PERCENTAGE:-'5'}
|
||||
export VAR_PERCENTAGE=${VAR_PERCENTAGE:-'10'}
|
||||
export PARTITION=${PARTITION:-"home:percentage=${HOME_PERCENTAGE},tmp:percentage=${TMP_PERCENTAGE},var:percentage=${VAR_PERCENTAGE}"}
|
||||
export PARTITION=${PARTITION:-"/home=${HOME_PERCENTAGE}%,/tmp=${TMP_PERCENTAGE}%,/var=${VAR_PERCENTAGE}%"}
|
||||
|
||||
function ip_subnet {
|
||||
ip_addr=$1
|
||||
ip_base="$(echo $ip_addr | cut -d. -f'1 2 3')"
|
||||
echo "${ip_base}.0/24"
|
||||
}
|
||||
|
||||
if [ -z "$MANAGEMENT_SUBNET" ]; then
|
||||
export MANAGEMENT_SUBNET=$(ip_subnet $ipaddr)
|
||||
fi
|
||||
export TENANT_SUBNET=${TENANT_SUBNET:-'172.16.2.0/24'}
|
||||
export PUBLIC_SUBNET=${PUBLIC_SUBNET:-'172.16.3.0/24'}
|
||||
export STORAGE_SUBNET=${STORAGE_SUBNET:-'172.16.4.0/24'}
|
||||
export SUBNETS=${SUBNETS:-"${MANAGEMENT_SUBNET},${TENANT_SUBNET},${PUBLIC_SUBNET},${STORAGE_SUBNET}"}
|
||||
|
||||
export SERVER_USERNAME=${SERVER_USERNAME:-root}
|
||||
export SERVER_PASSWORD=${SERVER_PASSWORD:-root}
|
||||
export SERVER_CREDENTIAL=${SERVER_CREDENTIAL:-"${SERVER_USERNAME}=${SERVER_PASSWORD}"}
|
||||
export SERVICE_USERNAME=${SERVICE_USERNAME:-service}
|
||||
export SERVICE_PASSWORD=${SERVICE_PASSWORD:-service}
|
||||
export SERVICE_IMAGE_CREDENTIAL=${SERVICE_IMAGE_CREDENTIAL:-"image:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_COMPUTE_CREDENTIAL=${SERVICE_COMPUTE_CREDENTIAL:-"compute:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_DASHBOARD_CREDENTIAL=${SERVICE_DASHBOARD_CREDENTIAL:-"dashboard:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_IDENTITY_CREDENTIAL=${SERVICE_IDENTITY_CREDENTIAL:-"identity:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_METERING_CREDENTIAL=${SERVICE_METERING_CREDENTIAL:-"metering:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_RABBITMQ_CREDENTIAL=${SERVICE_RABBITMQ_CREDENTIAL:-"rabbitmq:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_VOLUME_CREDENTIAL=${SERVICE_VOLUME_CREDENTIAL:-"volume:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_MYSQL_CREDENTIAL=${SERVICE_MYSQL_CREDENTIAL:-"mysql:${SERVICE_USERNAME}=${SERVICE_PASSWORD}"}
|
||||
export SERVICE_CREDENTIALS=${SERVICE_CREDENTIALS:-"${SERVICE_IMAGE_CREDENTIAL},${SERVICE_COMPUTE_CREDENTIAL},${SERVICE_DASHBOARD_CREDENTIAL},${SERVICE_IDENTITY_CREDENTIAL},${SERVICE_METERING_CREDENTIAL},${SERVICE_RABBITMQ_CREDENTIAL},${SERVICE_VOLUME_CREDENTIAL},${SERVICE_MYSQL_CREDENTIAL}"}
|
||||
export CONSOLE_USERNAME=${CONSOLE_USERNAME:-console}
|
||||
export CONSOLE_PASSWORD=${CONSOLE_PASSWORD:-console}
|
||||
export SECURITY=${SECURITY:-"server:${SERVER_USERNAME}=${SERVER_PASSWORD},service:${SERVICE_USERNAME}=${SERVICE_PASSWORD},console:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_ADMIN_CREDENTIAL=${CONSOLE_ADMIN_CREDENTIAL:-"admin:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_COMPUTE_CREDENTIAL=${CONSOLE_COMPUTE_CREDENTIAL:-"compute:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_DASHBOARD_CREDENTIAL=${CONSOLE_DASHBOARD_CREDENTIAL:-"dashboard:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_IMAGE_CREDENTIAL=${CONSOLE_IMAGE_CREDENTIAL:-"image:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_METERING_CREDENTIAL=${CONSOLE_METERING_CREDENTIAL:-"metering:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_NETWORK_CREDENTIAL=${CONSOLE_NETWORK_CREDENTIAL:-"network:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_OBJECT_STORE_CREDENTIAL=${CONSOLE_OBJECT_STORE_CREDENTIAL:-"object-store:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_VOLUME_CREDENTIAL=${CONSOLE_VOLUME_CREDENTIAL:-"volume:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
|
||||
export CONSOLE_CREDENTIALS=${CONSOLE_CREDENTIALS:-"${CONSOLE_ADMIN_CREDENTIAL},${CONSOLE_COMPUTE_CREDENTIAL},${CONSOLE_DASHBOARD_CREDENTIAL},${CONSOLE_IMAGE_CREDENTIAL},${CONSOLE_METERING_CREDENTIAL},${CONSOLE_NETWORK_CREDENTIAL},${CONSOLE_OBJECT_STORE_CREDENTIAL},${CONSOLE_VOLUME_CREDENTIAL}"}
|
||||
|
||||
export DASHBOARD_ROLE=${DASHBOARD_ROLE:-"os-controller"}
|
||||
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.50}
|
||||
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.1'}
|
||||
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.1'}
|
||||
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.1'}
|
||||
export MANAGEMENT_INTERFACE=${MANAGEMENT_INTERFACE:-eth0}
|
||||
export TENANT_INTERFACE=${TENANT_INTERFACE:-eth1}
|
||||
export STORAGE_INTERFACE=${STORAGE_INTERFACE:-eth3}
|
||||
export PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth2}
|
||||
|
||||
function next_ip {
|
||||
ip_addr=$1
|
||||
ip_base="$(echo $ip_addr | cut -d. -f'1 2 3')"
|
||||
ip_last="$(echo $ip_addr | cut -d. -f4)"
|
||||
let ip_last_next=$ip_last+1
|
||||
echo "${ip_base}.${ip_last_next}"
|
||||
}
|
||||
|
||||
if [ -z "$HOST_NETWORKS" ]; then
|
||||
IFS=, read -a HOSTNAME_LIST <<< "$HOSTNAMES"
|
||||
MANAGE_IP=${MANAGEMENT_IP_START}
|
||||
TENANT_IP=${TENANT_IP_START}
|
||||
PUBLIC_IP=${PUBLIC_IP_START}
|
||||
STORAGE_IP=${STORAGE_IP_START}
|
||||
for HOSTNAME in ${HOSTNAME_LIST[@]}; do
|
||||
if [ -z "$HOST_NETWORKS" ]; then
|
||||
HOST_NETWORKS="${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}"
|
||||
else
|
||||
HOST_NETWORKS="${HOST_NETWORKS};${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP},${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP},${STORAGE_INTERFACE}=${STORAGE_IP}"
|
||||
fi
|
||||
MANAGE_IP=$(next_ip ${MANAGE_IP})
|
||||
TENANT_IP=$(next_ip ${TENANT_IP})
|
||||
PUBLIC_IP=$(next_ip ${PUBLIC_IP})
|
||||
STORAGE_IP=$(next_ip ${STORAGE_IP})
|
||||
done
|
||||
export HOST_NETWORKS
|
||||
fi
|
||||
|
||||
export NETWORK_MAPPING=${NETWORK_MAPPING:-"management=${MANAGEMENT_INTERFACE},tenant=${TENANT_INTERFACE},storage=${STORAGE_INTERFACE},public=${PUBLIC_INTERFACE}"}
|
||||
export DEPLOYMENT_TIMEOUT=${DEPLOYMENT_TIMEOUT:-"90"}
|
||||
export DASHBOARD_URL=${DASHBOARD_URL:-"http://${MANAGEMENT_IP_START}"}
|
||||
|
@ -38,16 +38,8 @@ source ${REGTEST_DIR}/${REGTEST_CONF}
|
||||
source `which virtualenvwrapper.sh`
|
||||
workon compass-core
|
||||
|
||||
declare -A roles_list
|
||||
machines=''
|
||||
|
||||
for roles in ${HOST_ROLES//;/ }; do
|
||||
roles_list[${#roles_list[@]}]=${roles}
|
||||
done
|
||||
echo "role list: ${roles_list[@]}"
|
||||
roles_offset=0
|
||||
host_roles_list=''
|
||||
|
||||
tear_down_machines
|
||||
|
||||
echo "setup $VIRT_NUM virt machines"
|
||||
@ -112,23 +104,9 @@ for i in `seq $VIRT_NUM`; do
|
||||
else
|
||||
machines="${machines},${mac}"
|
||||
fi
|
||||
|
||||
if [ $roles_offset -lt ${#roles_list[@]} ]; then
|
||||
host_roles="host${i}=${roles_list[$roles_offset]}"
|
||||
roles_offset=$(expr $roles_offset + 1)
|
||||
else
|
||||
host_roles="host${i}="
|
||||
fi
|
||||
|
||||
if [ -z "$host_roles_list" ]; then
|
||||
host_roles_list="$host_roles"
|
||||
else
|
||||
host_roles_list="${host_roles_list};$host_roles"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "machines: $machines"
|
||||
echo "host roles: $host_roles_list"
|
||||
virsh list
|
||||
|
||||
# Avoid infinite relative symbolic links
|
||||
@ -139,10 +117,18 @@ if [[ ! -L compass_logs ]]; then
|
||||
ln -s /var/log/compass compass_logs
|
||||
fi
|
||||
CLIENT_SCRIPT=/opt/compass/bin/client.py
|
||||
/opt/compass/bin/refresh.sh
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to refresh"
|
||||
exit 1
|
||||
if [[ "$CLEAN_OLD_DATA" == "0" || "$CLEAN_OLD_DATA" == "false" ]]; then
|
||||
echo "keep old deployment data"
|
||||
else
|
||||
/opt/compass/bin/refresh.sh
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to refresh"
|
||||
exit 1
|
||||
fi
|
||||
/opt/compass/bin/clean_nodes.sh
|
||||
/opt/compass/bin/clean_clients.sh
|
||||
/opt/compass/bin/clean_environments.sh
|
||||
/opt/compass/bin/remove_systems.sh
|
||||
fi
|
||||
|
||||
if [[ "$USE_POLL_SWITCHES" == "0" || "$USE_POLL_SWITCHES" == "false" ]]; then
|
||||
@ -153,7 +139,7 @@ if [[ "$USE_POLL_SWITCHES" == "0" || "$USE_POLL_SWITCHES" == "false" ]]; then
|
||||
echo "switch,${switch_ip},huawei,${SWITCH_VERSION},${SWITCH_COMMUNITY},under_monitoring" >> ${TMP_SWITCH_MACHINE_FILE}
|
||||
switch_port=1
|
||||
for mac in ${machines//,/ }; do
|
||||
echo "machine,${switch_ip},${switch_port},1,${mac}" >> ${TMP_SWITCH_MACHINE_FILE}
|
||||
echo "machine,${switch_ip},${switch_port},${mac}" >> ${TMP_SWITCH_MACHINE_FILE}
|
||||
let switch_port+=1
|
||||
done
|
||||
break
|
||||
@ -162,11 +148,15 @@ if [[ "$USE_POLL_SWITCHES" == "0" || "$USE_POLL_SWITCHES" == "false" ]]; then
|
||||
cat $TMP_SWITCH_MACHINE_FILE
|
||||
echo "======================================================="
|
||||
/opt/compass/bin/manage_db.py set_switch_machines --switch_machines_file ${TMP_SWITCH_MACHINE_FILE}
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to set switch machines"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
POLL_SWITCHES_FLAG="poll_switches"
|
||||
fi
|
||||
|
||||
${CLIENT_SCRIPT} --logfile= --loglevel=info --logdir= --networking="${NETWORKING}" --partitions="${PARTITION}" --credentials="${SECURITY}" --host_roles="${host_roles_list}" --dashboard_role="${DASHBOARD_ROLE}" --switch_ips="${SWITCH_IPS}" --machines="${machines}" --switch_credential="${SWITCH_CREDENTIAL}" --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG}
|
||||
${CLIENT_SCRIPT} --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" --hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" --adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" --adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" --http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" --ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" --search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" --server_credential="${SERVER_CREDENTIAL}" --service_credentials="${SERVICE_CREDENTIALS}" --console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" --network_mapping="${NETWORK_MAPPING}" --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" --machines="${machines}" --switch_credential="${SWITCH_CREDENTIAL}" --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}"
|
||||
rc=$?
|
||||
deactivate
|
||||
# Tear down machines after the test
|
||||
@ -175,12 +165,12 @@ if [[ $rc != 0 ]]; then
|
||||
echo "deployment failed"
|
||||
exit 1
|
||||
fi
|
||||
if [[ $tempest == true ]]; then
|
||||
./tempest_run.sh
|
||||
if [[ $? != 0 ]]; then
|
||||
tear_down_machines
|
||||
echo "tempest failed"
|
||||
exit 1
|
||||
fi
|
||||
tear_down_machines
|
||||
fi
|
||||
#if [[ $tempest == true ]]; then
|
||||
# ./tempest_run.sh
|
||||
# if [[ $? != 0 ]]; then
|
||||
# tear_down_machines
|
||||
# echo "tempest failed"
|
||||
# exit 1
|
||||
# fi
|
||||
# tear_down_machines
|
||||
#fi
|
||||
|
@ -1,9 +1,10 @@
|
||||
# Set test script variables
|
||||
export VIRT_NUM=${VIRT_NUM:-'2'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'5'}
|
||||
# conf to run 1 instance with single-contoller-multi-compute flavor
|
||||
export VIRT_NUM=${VIRT_NUM:-'1'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'10'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'8192'}
|
||||
export VIRT_DISK=${VIRT_DISK:-'30G'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'os-controller,os-ops-database,os-ops-messaging,os-image'}
|
||||
|
||||
export HOSTNAMES=${HOSTNAMES:-'single-contoller-multi-compute'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'single-contoller-multi-compute=os-controller,os-compute-worker,os-network,os-block-storage-volume'}
|
||||
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'single-contoller-multi-compute'}
|
||||
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
source ${REGTEST_DIR}/regtest.conf
|
||||
|
@ -1,10 +1,11 @@
|
||||
# Set test script variables
|
||||
export VIRT_NUM=${VIRT_NUM:-'7'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'4'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'6144'}
|
||||
export VIRT_NUM=${VIRT_NUM:-'1'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'10'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'8192'}
|
||||
export VIRT_DISK=${VIRT_DISK:-'30G'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'os-controller,os-image;os-ops-database,os-ops-messaging;os-network,os-block-storage-worker;os-ha;os-ha'}
|
||||
export HA_VIP=${HA_VIP:-`echo $ipaddr |cut -d. -f'1 2 3'`.253}
|
||||
|
||||
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
export HOSTNAMES=${HOSTNAMES:-'multinodes'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'multinodes=os-ops-database,os-ops-messaging,os-identity,os-compute-controller,os-compute-worker,os-network-server,os-network-worker,os-block-storage-volume,os-block-storage-controller,os-image,os-dashboard'}
|
||||
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
|
||||
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'multinodes'}
|
||||
REGTEST_DI=R$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
source ${REGTEST_DIR}/regtest.conf
|
||||
|
@ -1,10 +1,11 @@
|
||||
# conf to run 10 intsances
|
||||
export VIRT_NUM=${VIRT_NUM:-'10'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'3'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'6144'}
|
||||
# conf to run 2 intsances with single-contoller-multi-compute flavor
|
||||
export VIRT_NUM=${VIRT_NUM:-'3'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'4'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'8192'}
|
||||
export VIRT_DISK=${VIRT_DISK:-'30G'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'os-controller,os-image;os-controller,os-image;os-ops-database,os-ops-messaging;os-network,os-block-storage-worker;os-ha;os-ha'}
|
||||
export HA_VIP=${HA_VIP:-`echo $ipaddr |cut -d. -f'1 2 3'`.253}
|
||||
export HOSTNAMES=${HOSTNAMES:-'single-controller-controller,sinle-controller-network,single-controller-compute'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'single-controller-controller=os-controller;sinle-controller-network=os-network,os-block-storage-volume'}
|
||||
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
|
||||
|
||||
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
source ${REGTEST_DIR}/regtest.conf
|
||||
source ${REGTEST_DIR}/regtest2.conf
|
||||
|
@ -1,9 +1,10 @@
|
||||
# conf to run os-dashboard
|
||||
export VIRT_NUM=${VIRT_NUM:-'1'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'10'}
|
||||
export VIRT_NUM=${VIRT_NUM:-'12'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'2'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'8192'}
|
||||
export VIRT_DISK=${VIRT_DISK:-'20G'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'os-dashboard'}
|
||||
export HOSTNAMES=${HOSTNAMES:-'multinodes-database,multinodes-messaging,multinodes-identity,multinodes-compute-controller,multinodes-compute-worker1,multinodes-compute-worker2,multinodes-network-server,multinodes-network-worker,multinodes-block-storage-volume,multinodes-block-storage-controller,multinodes-image,multinodes-dashboard'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'multinodes-database=os-ops-database;multinodes-messaging=os-ops-messaging;multinodes-identity=os-identity;multinodes-compute-controller=os-compute-controller;multinodes-network-server=os-network-server;multinodes-network-worker=os-network-worker;multinodes-block-storage-volume=os-block-storage-volume;multinodes-block-storage-controller=os-block-storage-controller;multinodes-image=os-image;multinodes-dashboard=os-dashboard'}
|
||||
|
||||
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
source ${REGTEST_DIR}/regtest.conf
|
||||
source ${REGTEST_DIR}/regtest3.conf
|
||||
|
@ -1,10 +0,0 @@
|
||||
# Set test script variables
|
||||
export VIRT_NUM=${VIRT_NUM:-'1'}
|
||||
export VIRT_CPUS=${VIRT_CPUS:-'10'}
|
||||
export VIRT_MEM=${VIRT_MEM:-'8192'}
|
||||
export VIRT_DISK=${VIRT_DISK:-'20G'}
|
||||
export HOST_ROLES=${HOST_ROLES:-'os-dashboard'}
|
||||
export DASHBOARD_ROLE=${DASHBOARD_ROLE:-"os-dashboard"}
|
||||
export DEPLOYMENT_TIMEOUT=${DEPLOYMENT_TIMEOUT:-'60'}
|
||||
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
source ${REGTEST_DIR}/regtest.conf
|
Loading…
x
Reference in New Issue
Block a user