Add expansion and role patching features.
1. also updated templates and confs with latest adapter conf. Change-Id: Id261c0243e8536f7b866807359260ef482a11791
This commit is contained in:
parent
c1cc40fe52
commit
a05ce06169
59
compass/actions/patch.py
Normal file
59
compass/actions/patch.py
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
# Copyright 2014 Huawei Technologies Co. Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Module to patch an existing cluster
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from compass.actions import util
|
||||||
|
from compass.db.api import cluster as cluster_db
|
||||||
|
from compass.db.api import user as user_db
|
||||||
|
from compass.deployment.deploy_manager import Patcher
|
||||||
|
from compass.deployment.utils import constants as const
|
||||||
|
|
||||||
|
|
||||||
|
def patch(cluster_id, username=None):
|
||||||
|
"""Patch cluster.
|
||||||
|
|
||||||
|
:param cluster_id: id of the cluster
|
||||||
|
:type cluster_id: int
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The function should be called out of database session.
|
||||||
|
"""
|
||||||
|
with util.lock('serialized_action', timeout=1000) as lock:
|
||||||
|
if not lock:
|
||||||
|
raise Exception('failed to acquire lock to deploy')
|
||||||
|
|
||||||
|
user = user_db.get_user_object(username)
|
||||||
|
cluster_hosts = cluster_db.list_cluster_hosts(cluster_id, user)
|
||||||
|
hosts_id_list = [host['id'] for host in cluster_hosts]
|
||||||
|
cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
|
||||||
|
adapter_id = cluster_info[const.ADAPTER_ID]
|
||||||
|
|
||||||
|
adapter_info = util.ActionHelper.get_adapter_info(
|
||||||
|
adapter_id, cluster_id, user)
|
||||||
|
hosts_info = util.ActionHelper.get_hosts_info(
|
||||||
|
cluster_id, hosts_id_list, user)
|
||||||
|
patch_successful = True
|
||||||
|
try:
|
||||||
|
patcher = Patcher(
|
||||||
|
adapter_info, cluster_info, hosts_info, cluster_hosts)
|
||||||
|
patched_config = patcher.patch()
|
||||||
|
except Exception as error:
|
||||||
|
logging.exception(error)
|
||||||
|
patch_successful = False
|
||||||
|
|
||||||
|
if patch_successful:
|
||||||
|
logging.info("Patch successful: %s", patched_config)
|
@ -2044,7 +2044,7 @@ def take_cluster_action(cluster_id):
|
|||||||
|
|
||||||
Supported actions: [
|
Supported actions: [
|
||||||
'add_hosts', 'remove_hosts', 'set_hosts',
|
'add_hosts', 'remove_hosts', 'set_hosts',
|
||||||
'review', 'deploy', 'check_health'
|
'review', 'deploy', 'check_health', 'apply_patch'
|
||||||
]
|
]
|
||||||
"""
|
"""
|
||||||
data = _get_request_data()
|
data = _get_request_data()
|
||||||
@ -2068,6 +2068,12 @@ def take_cluster_action(cluster_id):
|
|||||||
),
|
),
|
||||||
202
|
202
|
||||||
)
|
)
|
||||||
|
patch_cluster_func = _wrap_response(
|
||||||
|
functools.partial(
|
||||||
|
cluster_api.patch_cluster, cluster_id, user=current_user,
|
||||||
|
),
|
||||||
|
202
|
||||||
|
)
|
||||||
check_cluster_health_func = _wrap_response(
|
check_cluster_health_func = _wrap_response(
|
||||||
functools.partial(
|
functools.partial(
|
||||||
health_report_api.start_check_cluster_health,
|
health_report_api.start_check_cluster_health,
|
||||||
@ -2084,6 +2090,7 @@ def take_cluster_action(cluster_id):
|
|||||||
remove_hosts=update_cluster_hosts_func,
|
remove_hosts=update_cluster_hosts_func,
|
||||||
review=review_cluster_func,
|
review=review_cluster_func,
|
||||||
deploy=deploy_cluster_func,
|
deploy=deploy_cluster_func,
|
||||||
|
apply_patch=patch_cluster_func,
|
||||||
check_health=check_cluster_health_func
|
check_health=check_cluster_health_func
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -51,7 +51,8 @@ RESP_CLUSTERHOST_FIELDS = [
|
|||||||
'os_name', 'os_id', 'ip',
|
'os_name', 'os_id', 'ip',
|
||||||
'reinstall_os', 'reinstall_distributed_system',
|
'reinstall_os', 'reinstall_distributed_system',
|
||||||
'owner', 'cluster_id',
|
'owner', 'cluster_id',
|
||||||
'created_at', 'updated_at'
|
'created_at', 'updated_at',
|
||||||
|
'patched_roles'
|
||||||
]
|
]
|
||||||
RESP_CONFIG_FIELDS = [
|
RESP_CONFIG_FIELDS = [
|
||||||
'os_config',
|
'os_config',
|
||||||
@ -285,14 +286,14 @@ def check_cluster_editable(
|
|||||||
'cluster %s is not editable '
|
'cluster %s is not editable '
|
||||||
'when state is installing' % cluster.name
|
'when state is installing' % cluster.name
|
||||||
)
|
)
|
||||||
elif (
|
# elif (
|
||||||
cluster.flavor_name and
|
# cluster.flavor_name and
|
||||||
not cluster.reinstall_distributed_system
|
# not cluster.reinstall_distributed_system
|
||||||
):
|
# ):
|
||||||
raise exception.Forbidden(
|
# raise exception.Forbidden(
|
||||||
'cluster %s is not editable '
|
# 'cluster %s is not editable '
|
||||||
'when not to be reinstalled' % cluster.name
|
# 'when not to be reinstalled' % cluster.name
|
||||||
)
|
# )
|
||||||
if user and not user.is_admin and cluster.creator_id != user.id:
|
if user and not user.is_admin and cluster.creator_id != user.id:
|
||||||
raise exception.Forbidden(
|
raise exception.Forbidden(
|
||||||
'cluster %s is not editable '
|
'cluster %s is not editable '
|
||||||
@ -759,6 +760,12 @@ def _add_clusterhost_only(
|
|||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
"""Get clusterhost only."""
|
"""Get clusterhost only."""
|
||||||
|
if not cluster.state.state == "UNINITIALIZED":
|
||||||
|
cluster.state.ready = False
|
||||||
|
cluster.state.state = "UNINITIALIZED"
|
||||||
|
cluster.state.percentage = 0.0
|
||||||
|
utils.update_db_object(session, cluster.state, state="UNINITIALIZED")
|
||||||
|
|
||||||
return utils.add_db_object(
|
return utils.add_db_object(
|
||||||
session, models.ClusterHost, exception_when_existing,
|
session, models.ClusterHost, exception_when_existing,
|
||||||
cluster.id, host.id, **kwargs
|
cluster.id, host.id, **kwargs
|
||||||
@ -780,6 +787,7 @@ def _add_clusterhost(
|
|||||||
machine_id, cluster, session=session,
|
machine_id, cluster, session=session,
|
||||||
user=user, **kwargs
|
user=user, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
return _add_clusterhost_only(
|
return _add_clusterhost_only(
|
||||||
cluster, host, exception_when_existing=exception_when_existing,
|
cluster, host, exception_when_existing=exception_when_existing,
|
||||||
session=session, user=user, **kwargs
|
session=session, user=user, **kwargs
|
||||||
@ -1060,12 +1068,14 @@ def patch_cluster_host(
|
|||||||
session=None, **kwargs
|
session=None, **kwargs
|
||||||
):
|
):
|
||||||
"""Patch clusterhost by cluster id and host id."""
|
"""Patch clusterhost by cluster id and host id."""
|
||||||
|
logging.info("kwargs are %s", kwargs)
|
||||||
clusterhost = _get_cluster_host(
|
clusterhost = _get_cluster_host(
|
||||||
cluster_id, host_id, session=session
|
cluster_id, host_id, session=session
|
||||||
)
|
)
|
||||||
return _update_clusterhost(
|
updated_clusterhost = _update_clusterhost(
|
||||||
clusterhost, session=session, user=user, **kwargs
|
clusterhost, session=session, user=user, **kwargs
|
||||||
)
|
)
|
||||||
|
return updated_clusterhost
|
||||||
|
|
||||||
|
|
||||||
# replace roles to patched_roles in kwargs.
|
# replace roles to patched_roles in kwargs.
|
||||||
@ -1848,6 +1858,33 @@ def deploy_cluster(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@utils.supported_filters(optional_support_keys=['apply_patch'])
|
||||||
|
@database.run_in_session()
|
||||||
|
@user_api.check_user_permission(
|
||||||
|
permission.PERMISSION_DEPLOY_CLUSTER
|
||||||
|
)
|
||||||
|
@utils.wrap_to_dict(
|
||||||
|
RESP_DEPLOY_FIELDS,
|
||||||
|
cluster=RESP_CONFIG_FIELDS,
|
||||||
|
hosts=RESP_CLUSTERHOST_FIELDS
|
||||||
|
)
|
||||||
|
def patch_cluster(cluster_id, user=None, session=None, **kwargs):
|
||||||
|
|
||||||
|
from compass.tasks import client as celery_client
|
||||||
|
|
||||||
|
cluster = _get_cluster(cluster_id, session=session)
|
||||||
|
celery_client.celery.send_task(
|
||||||
|
'compass.tasks.patch_cluster',
|
||||||
|
(
|
||||||
|
user.email, cluster_id,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
'status': 'patch action sent',
|
||||||
|
'cluster': cluster
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@utils.supported_filters([])
|
@utils.supported_filters([])
|
||||||
@database.run_in_session()
|
@database.run_in_session()
|
||||||
@user_api.check_user_permission(
|
@user_api.check_user_permission(
|
||||||
|
@ -319,6 +319,9 @@ def validate_host(host):
|
|||||||
def _update_host(host_id, session=None, user=None, **kwargs):
|
def _update_host(host_id, session=None, user=None, **kwargs):
|
||||||
"""Update a host internal."""
|
"""Update a host internal."""
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
|
if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
|
||||||
|
logging.info("ignoring successful host: %s", host_id)
|
||||||
|
return {}
|
||||||
check_host_editable(
|
check_host_editable(
|
||||||
host, user=user,
|
host, user=user,
|
||||||
check_in_installing=kwargs.get('reinstall_os', False)
|
check_in_installing=kwargs.get('reinstall_os', False)
|
||||||
@ -752,6 +755,13 @@ def update_host_network(
|
|||||||
host_id, host_network_id, user=None, session=None, **kwargs
|
host_id, host_network_id, user=None, session=None, **kwargs
|
||||||
):
|
):
|
||||||
"""Update a host network by host id and host network id."""
|
"""Update a host network by host id and host network id."""
|
||||||
|
host = _get_host(
|
||||||
|
host_id, session=session
|
||||||
|
)
|
||||||
|
if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
|
||||||
|
logging.info("ignoring updating request for successful hosts")
|
||||||
|
return {}
|
||||||
|
|
||||||
host_network = _get_host_network(
|
host_network = _get_host_network(
|
||||||
host_id, host_network_id, session=session
|
host_id, host_network_id, session=session
|
||||||
)
|
)
|
||||||
|
@ -383,6 +383,7 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
|
|||||||
)
|
)
|
||||||
# the list of role names.
|
# the list of role names.
|
||||||
_roles = Column('roles', JSONEncoded, default=[])
|
_roles = Column('roles', JSONEncoded, default=[])
|
||||||
|
_patched_roles = Column('patched_roles', JSONEncoded, default=[])
|
||||||
config_step = Column(String(80), default='')
|
config_step = Column(String(80), default='')
|
||||||
package_config = Column(JSONEncoded, default={})
|
package_config = Column(JSONEncoded, default={})
|
||||||
config_validated = Column(Boolean, default=False)
|
config_validated = Column(Boolean, default=False)
|
||||||
@ -556,7 +557,17 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def patched_roles(self):
|
def patched_roles(self):
|
||||||
return self.roles
|
patched_role_names = list(self._patched_roles)
|
||||||
|
if not patched_role_names:
|
||||||
|
return []
|
||||||
|
cluster_roles = self.cluster.flavor['roles']
|
||||||
|
if not cluster_roles:
|
||||||
|
return []
|
||||||
|
roles = []
|
||||||
|
for cluster_role in cluster_roles:
|
||||||
|
if cluster_role['name'] in patched_role_names:
|
||||||
|
roles.append(cluster_role)
|
||||||
|
return roles
|
||||||
|
|
||||||
@patched_roles.setter
|
@patched_roles.setter
|
||||||
def patched_roles(self, value):
|
def patched_roles(self, value):
|
||||||
@ -564,6 +575,9 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
|
|||||||
roles = list(self._roles)
|
roles = list(self._roles)
|
||||||
roles.extend(value)
|
roles.extend(value)
|
||||||
self._roles = roles
|
self._roles = roles
|
||||||
|
patched_roles = list(self._patched_roles)
|
||||||
|
patched_roles.extend(value)
|
||||||
|
self._patched_roles = patched_roles
|
||||||
self.config_validated = False
|
self.config_validated = False
|
||||||
|
|
||||||
@hybrid_property
|
@hybrid_property
|
||||||
@ -621,6 +635,7 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
|
|||||||
'state': state_dict['state']
|
'state': state_dict['state']
|
||||||
})
|
})
|
||||||
dict_info['roles'] = self.roles
|
dict_info['roles'] = self.roles
|
||||||
|
dict_info['patched_roles'] = self.patched_roles
|
||||||
return dict_info
|
return dict_info
|
||||||
|
|
||||||
|
|
||||||
|
@ -174,6 +174,38 @@ class DeployManager(object):
|
|||||||
self.pk_installer.cluster_ready()
|
self.pk_installer.cluster_ready()
|
||||||
|
|
||||||
|
|
||||||
|
class Patcher(DeployManager):
|
||||||
|
"""Patcher Module."""
|
||||||
|
def __init__(self, adapter_info, cluster_info, hosts_info, cluster_hosts):
|
||||||
|
self.pk_installer = None
|
||||||
|
self.cluster_info = cluster_info
|
||||||
|
registered_roles = cluster_info['flavor']['roles']
|
||||||
|
|
||||||
|
pk_info = adapter_info.setdefault(const.PK_INSTALLER, {})
|
||||||
|
if pk_info:
|
||||||
|
pk_installer_name = pk_info[const.NAME]
|
||||||
|
self.pk_installer = Patcher._get_installer(PKInstaller,
|
||||||
|
pk_installer_name,
|
||||||
|
adapter_info,
|
||||||
|
cluster_info,
|
||||||
|
hosts_info)
|
||||||
|
|
||||||
|
patched_role_mapping = {}
|
||||||
|
for role in registered_roles:
|
||||||
|
patched_role_mapping[role] = []
|
||||||
|
for host in cluster_hosts:
|
||||||
|
if len(host['patched_roles']) == 0:
|
||||||
|
continue
|
||||||
|
for role in host['patched_roles']:
|
||||||
|
patched_role_mapping[role['name']].append(host)
|
||||||
|
self.patched_role_mapping = patched_role_mapping
|
||||||
|
|
||||||
|
def patch(self):
|
||||||
|
patched_config = self.pk_installer.patch(self.patched_role_mapping)
|
||||||
|
|
||||||
|
return patched_config
|
||||||
|
|
||||||
|
|
||||||
class PowerManager(object):
|
class PowerManager(object):
|
||||||
"""Manage host to power on, power off, and reset."""
|
"""Manage host to power on, power off, and reset."""
|
||||||
|
|
||||||
|
@ -138,6 +138,14 @@ class ClusterInfo(object):
|
|||||||
|
|
||||||
return dict(mapping)
|
return dict(mapping)
|
||||||
|
|
||||||
|
def _get_cluster_patched_roles_mapping(self):
|
||||||
|
mapping = defaultdict(list)
|
||||||
|
for host in self.hosts:
|
||||||
|
for role, value in host.patched_roles_mapping.iteritems():
|
||||||
|
mapping[role].append(value)
|
||||||
|
|
||||||
|
return dict(mapping)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def base_info(self):
|
def base_info(self):
|
||||||
return {
|
return {
|
||||||
@ -160,6 +168,7 @@ class HostInfo(object):
|
|||||||
|
|
||||||
self.package_config = self.host_info.setdefault(const.PK_CONFIG, {})
|
self.package_config = self.host_info.setdefault(const.PK_CONFIG, {})
|
||||||
self.roles = self.host_info.setdefault(const.ROLES, [])
|
self.roles = self.host_info.setdefault(const.ROLES, [])
|
||||||
|
self.patched_roles = self.host_info.setdefault(const.PATCHED_ROLES, [])
|
||||||
self.ipmi = deepcopy(self.host_info.setdefault(const.IPMI, {}))
|
self.ipmi = deepcopy(self.host_info.setdefault(const.IPMI, {}))
|
||||||
self.reinstall_os_flag = self.host_info.get(const.REINSTALL_OS_FLAG)
|
self.reinstall_os_flag = self.host_info.get(const.REINSTALL_OS_FLAG)
|
||||||
self.deployed_os_config = self.host_info.setdefault(
|
self.deployed_os_config = self.host_info.setdefault(
|
||||||
@ -197,6 +206,8 @@ class HostInfo(object):
|
|||||||
self.roles_mapping = \
|
self.roles_mapping = \
|
||||||
self.deployed_package_config[const.ROLES_MAPPING]
|
self.deployed_package_config[const.ROLES_MAPPING]
|
||||||
|
|
||||||
|
self.patched_roles_mapping = self._get_host_patched_roles_mapping()
|
||||||
|
|
||||||
self.cluster_info.add_host(self)
|
self.cluster_info.add_host(self)
|
||||||
|
|
||||||
def valid_interface(self, interface):
|
def valid_interface(self, interface):
|
||||||
@ -241,6 +252,25 @@ class HostInfo(object):
|
|||||||
|
|
||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
def _get_host_patched_roles_mapping(self):
|
||||||
|
if not self.network_mapping:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
net_info = {const.HOSTNAME: self.hostname}
|
||||||
|
for k, v in self.network_mapping.items():
|
||||||
|
try:
|
||||||
|
net_info[k] = self.networks[v[const.NIC]]
|
||||||
|
net_info[k][const.NIC] = v[const.NIC]
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
mapping = {}
|
||||||
|
for role in self.patched_roles:
|
||||||
|
role = role['name'].replace("-", "_")
|
||||||
|
mapping[role] = net_info
|
||||||
|
|
||||||
|
return mapping
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def baseinfo(self):
|
def baseinfo(self):
|
||||||
return {
|
return {
|
||||||
@ -332,6 +362,9 @@ class BaseConfigManager(object):
|
|||||||
def get_cluster_roles_mapping(self):
|
def get_cluster_roles_mapping(self):
|
||||||
return self.cluster_info.roles_mapping
|
return self.cluster_info.roles_mapping
|
||||||
|
|
||||||
|
def get_cluster_patched_roles_mapping(self):
|
||||||
|
return self.cluster_info._get_cluster_patched_roles_mapping()
|
||||||
|
|
||||||
def validate_host(self, host_id):
|
def validate_host(self, host_id):
|
||||||
if host_id not in self.hosts_info:
|
if host_id not in self.hosts_info:
|
||||||
raise RuntimeError("host_id %s is invalid" % host_id)
|
raise RuntimeError("host_id %s is invalid" % host_id)
|
||||||
|
@ -48,6 +48,7 @@ def byteify(input):
|
|||||||
class AnsibleInstaller(PKInstaller):
|
class AnsibleInstaller(PKInstaller):
|
||||||
INVENTORY_TMPL_DIR = 'inventories'
|
INVENTORY_TMPL_DIR = 'inventories'
|
||||||
GROUPVARS_TMPL_DIR = 'vars'
|
GROUPVARS_TMPL_DIR = 'vars'
|
||||||
|
INVENTORY_PATCH_TEMPALTE_DIR = 'inventories'
|
||||||
|
|
||||||
# keywords in package installer settings
|
# keywords in package installer settings
|
||||||
ANSIBLE_DIR = 'ansible_dir'
|
ANSIBLE_DIR = 'ansible_dir'
|
||||||
@ -256,8 +257,7 @@ class AnsibleInstaller(PKInstaller):
|
|||||||
tmpl = Template(file=tmpl_path, searchList=searchList)
|
tmpl = Template(file=tmpl_path, searchList=searchList)
|
||||||
return tmpl.respond()
|
return tmpl.respond()
|
||||||
|
|
||||||
def _create_ansible_run_env(self, env_name):
|
def _create_ansible_run_env(self, env_name, ansible_run_destination):
|
||||||
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
|
|
||||||
os.mkdir(ansible_run_destination)
|
os.mkdir(ansible_run_destination)
|
||||||
|
|
||||||
# copy roles to run env
|
# copy roles to run env
|
||||||
@ -288,7 +288,9 @@ class AnsibleInstaller(PKInstaller):
|
|||||||
|
|
||||||
def prepare_ansible(self, env_name, global_vars_dict):
|
def prepare_ansible(self, env_name, global_vars_dict):
|
||||||
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
|
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
|
||||||
self._create_ansible_run_env(env_name)
|
if os.path.exists(ansible_run_destination):
|
||||||
|
ansible_run_destination += "-expansion"
|
||||||
|
self._create_ansible_run_env(env_name, ansible_run_destination)
|
||||||
inv_config = self._generate_inventory_attributes(global_vars_dict)
|
inv_config = self._generate_inventory_attributes(global_vars_dict)
|
||||||
inventory_dir = os.path.join(ansible_run_destination, 'inventories')
|
inventory_dir = os.path.join(ansible_run_destination, 'inventories')
|
||||||
|
|
||||||
@ -353,11 +355,39 @@ class AnsibleInstaller(PKInstaller):
|
|||||||
# Create ansible related files
|
# Create ansible related files
|
||||||
self.prepare_ansible(env_name, global_vars_dict)
|
self.prepare_ansible(env_name, global_vars_dict)
|
||||||
|
|
||||||
|
def patch(self, patched_role_mapping):
|
||||||
|
adapter_name = self.config_manager.get_adapter_name()
|
||||||
|
cluster_name = self.config_manager.get_clustername()
|
||||||
|
env_name = self.get_env_name(adapter_name, cluster_name)
|
||||||
|
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
|
||||||
|
inventory_dir = os.path.join(ansible_run_destination, 'inventories')
|
||||||
|
patched_global_vars_dict = self._get_cluster_tmpl_vars()
|
||||||
|
mapping = self.config_manager.get_cluster_patched_roles_mapping()
|
||||||
|
patched_global_vars_dict['roles_mapping'] = mapping
|
||||||
|
patched_inv = self._generate_inventory_attributes(
|
||||||
|
patched_global_vars_dict)
|
||||||
|
inv_file = os.path.join(inventory_dir, 'patched_inventory.yml')
|
||||||
|
self.serialize_config(patched_inv, inv_file)
|
||||||
|
config_file = os.path.join(
|
||||||
|
ansible_run_destination, self.ansible_config
|
||||||
|
)
|
||||||
|
playbook_file = os.path.join(ansible_run_destination, self.playbook)
|
||||||
|
log_file = os.path.join(ansible_run_destination, 'patch.log')
|
||||||
|
cmd = "ANSIBLE_CONFIG=%s ansible-playbook -i %s %s" % (config_file,
|
||||||
|
inv_file,
|
||||||
|
playbook_file)
|
||||||
|
with open(log_file, 'w') as logfile:
|
||||||
|
subprocess.Popen(cmd, shell=True, stdout=logfile, stderr=logfile)
|
||||||
|
return patched_role_mapping
|
||||||
|
|
||||||
def cluster_os_ready(self):
|
def cluster_os_ready(self):
|
||||||
adapter_name = self.config_manager.get_adapter_name()
|
adapter_name = self.config_manager.get_adapter_name()
|
||||||
cluster_name = self.config_manager.get_clustername()
|
cluster_name = self.config_manager.get_clustername()
|
||||||
env_name = self.get_env_name(adapter_name, cluster_name)
|
env_name = self.get_env_name(adapter_name, cluster_name)
|
||||||
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
|
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
|
||||||
|
expansion_dir = ansible_run_destination + "-expansion"
|
||||||
|
if os.path.exists(expansion_dir):
|
||||||
|
ansible_run_destination = expansion_dir
|
||||||
inventory_dir = os.path.join(ansible_run_destination, 'inventories')
|
inventory_dir = os.path.join(ansible_run_destination, 'inventories')
|
||||||
inventory_file = os.path.join(inventory_dir, self.inventory)
|
inventory_file = os.path.join(inventory_dir, self.inventory)
|
||||||
playbook_file = os.path.join(ansible_run_destination, self.playbook)
|
playbook_file = os.path.join(ansible_run_destination, self.playbook)
|
||||||
|
@ -78,6 +78,7 @@ OS_CONFIG = 'os_config'
|
|||||||
OS_CONFIG_GENERAL = 'general'
|
OS_CONFIG_GENERAL = 'general'
|
||||||
PK_CONFIG = 'package_config'
|
PK_CONFIG = 'package_config'
|
||||||
ROLES = 'roles'
|
ROLES = 'roles'
|
||||||
|
PATCHED_ROLES = 'patched_roles'
|
||||||
ROLES_MAPPING = 'roles_mapping'
|
ROLES_MAPPING = 'roles_mapping'
|
||||||
SERVER_CREDS = 'server_credentials'
|
SERVER_CREDS = 'server_credentials'
|
||||||
TMPL_VARS_DICT = 'vars_dict'
|
TMPL_VARS_DICT = 'vars_dict'
|
||||||
|
@ -25,6 +25,7 @@ from compass.actions import clean
|
|||||||
from compass.actions import delete
|
from compass.actions import delete
|
||||||
from compass.actions import deploy
|
from compass.actions import deploy
|
||||||
from compass.actions import install_callback
|
from compass.actions import install_callback
|
||||||
|
from compass.actions import patch
|
||||||
from compass.actions import poll_switch
|
from compass.actions import poll_switch
|
||||||
from compass.actions import update_progress
|
from compass.actions import update_progress
|
||||||
from compass.db.api import adapter_holder as adapter_api
|
from compass.db.api import adapter_holder as adapter_api
|
||||||
@ -112,6 +113,19 @@ def deploy_cluster(deployer_email, cluster_id, clusterhost_ids):
|
|||||||
logging.exception(error)
|
logging.exception(error)
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(name='compass.tasks.patch_cluster')
|
||||||
|
def patch_cluster(patcher_email, cluster_id):
|
||||||
|
"""Patch the existing cluster.
|
||||||
|
|
||||||
|
:param cluster_id: id of the cluster
|
||||||
|
:type cluster_id: int
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
patch.patch(cluster_id, patcher_email)
|
||||||
|
except Exception as error:
|
||||||
|
logging.exception(error)
|
||||||
|
|
||||||
|
|
||||||
@celery.task(name='compass.tasks.reinstall_cluster')
|
@celery.task(name='compass.tasks.reinstall_cluster')
|
||||||
def reinstall_cluster(installer_email, cluster_id, clusterhost_ids):
|
def reinstall_cluster(installer_email, cluster_id, clusterhost_ids):
|
||||||
"""reinstall the given cluster.
|
"""reinstall the given cluster.
|
||||||
|
@ -396,28 +396,14 @@ class TestUpdateCluster(ClusterTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
# state is INSTALLING
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.update_cluster,
|
|
||||||
self.cluster_id,
|
|
||||||
user=self.user_object,
|
|
||||||
name='cluster_editable'
|
|
||||||
)
|
|
||||||
|
|
||||||
# reinstall
|
|
||||||
self.assertRaises(
|
|
||||||
exception.Forbidden,
|
|
||||||
cluster.update_cluster,
|
|
||||||
self.cluster_id,
|
|
||||||
user=self.user_object,
|
|
||||||
reinstall_distributed_system=True
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDelCluster(ClusterTestCase):
|
class TestDelCluster(ClusterTestCase):
|
||||||
@ -443,18 +429,14 @@ class TestDelCluster(ClusterTestCase):
|
|||||||
self.assertNotEqual(1, del_cluster['id'])
|
self.assertNotEqual(1, del_cluster['id'])
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
# state is INSTALLING
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.del_cluster,
|
|
||||||
self.cluster_id,
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestGetClusterConfig(ClusterTestCase):
|
class TestGetClusterConfig(ClusterTestCase):
|
||||||
@ -630,17 +612,14 @@ class TestDelClusterConfig(ClusterTestCase):
|
|||||||
self.assertEqual(config, {})
|
self.assertEqual(config, {})
|
||||||
|
|
||||||
def test_cluster_editable(self):
|
def test_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.del_cluster_config,
|
|
||||||
self.cluster_id,
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestListClusterHosts(ClusterTestCase):
|
class TestListClusterHosts(ClusterTestCase):
|
||||||
@ -774,19 +753,14 @@ class TestAddClusterHost(ClusterTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
# installing
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.add_cluster_host,
|
|
||||||
self.cluster_id,
|
|
||||||
user=self.user_object,
|
|
||||||
machine_id=self.add_machine_id
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestUpdateClusterHost(ClusterTestCase):
|
class TestUpdateClusterHost(ClusterTestCase):
|
||||||
@ -836,19 +810,14 @@ class TestUpdateClusterHost(ClusterTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
# state is INSTALLING
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.update_cluster_host,
|
|
||||||
self.cluster_id,
|
|
||||||
self.host_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestUpdateClusterhost(ClusterTestCase):
|
class TestUpdateClusterhost(ClusterTestCase):
|
||||||
@ -895,18 +864,14 @@ class TestUpdateClusterhost(ClusterTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
# state is INSTALLING
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.update_clusterhost,
|
|
||||||
self.clusterhost_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestPatchClusterHost(ClusterTestCase):
|
class TestPatchClusterHost(ClusterTestCase):
|
||||||
@ -935,18 +900,14 @@ class TestPatchClusterHost(ClusterTestCase):
|
|||||||
self.assertEqual(result, 'all in one compute')
|
self.assertEqual(result, 'all in one compute')
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.patch_cluster_host,
|
|
||||||
self.cluster_id,
|
|
||||||
self.host_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestPatchClusterhost(ClusterTestCase):
|
class TestPatchClusterhost(ClusterTestCase):
|
||||||
@ -972,18 +933,15 @@ class TestPatchClusterhost(ClusterTestCase):
|
|||||||
result = role['display_name']
|
result = role['display_name']
|
||||||
self.assertEqual(result, 'all in one compute')
|
self.assertEqual(result, 'all in one compute')
|
||||||
|
|
||||||
def testi_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.patch_clusterhost,
|
|
||||||
self.clusterhost_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDelClusterHost(ClusterTestCase):
|
class TestDelClusterHost(ClusterTestCase):
|
||||||
@ -1011,18 +969,14 @@ class TestDelClusterHost(ClusterTestCase):
|
|||||||
self.assertNotEqual(del_cluster_host['id'], 1)
|
self.assertNotEqual(del_cluster_host['id'], 1)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.del_cluster_host,
|
|
||||||
self.cluster_id,
|
|
||||||
self.host_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDelClusterhost(ClusterTestCase):
|
class TestDelClusterhost(ClusterTestCase):
|
||||||
@ -1048,17 +1002,14 @@ class TestDelClusterhost(ClusterTestCase):
|
|||||||
self.assertNotEqual(del_clusterhost['id'], 1)
|
self.assertNotEqual(del_clusterhost['id'], 1)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.del_clusterhost,
|
|
||||||
self.clusterhost_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestGetClusterHostConfig(ClusterTestCase):
|
class TestGetClusterHostConfig(ClusterTestCase):
|
||||||
@ -1234,20 +1185,14 @@ class TestUpdateClusterHostConfig(ClusterTestCase):
|
|||||||
self.assertItemsEqual(os_configs, self.os_configs)
|
self.assertItemsEqual(os_configs, self.os_configs)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.update_cluster_host_config,
|
|
||||||
self.cluster_id,
|
|
||||||
self.host_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
os_config=self.os_configs,
|
|
||||||
package_config=self.package_configs
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestUpdateClusterHostDeployedConfig(ClusterTestCase):
|
class TestUpdateClusterHostDeployedConfig(ClusterTestCase):
|
||||||
@ -1323,19 +1268,14 @@ class TestUpdateClusterhostConfig(ClusterTestCase):
|
|||||||
self.assertItemsEqual(os_config, self.os_configs)
|
self.assertItemsEqual(os_config, self.os_configs)
|
||||||
|
|
||||||
def test_id_cluster_editable(self):
|
def test_id_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.update_clusterhost_config,
|
|
||||||
self.clusterhost_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
os_config=self.os_configs,
|
|
||||||
package_config=self.package_configs
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestUpdateClusterhostDeployedConfig(ClusterTestCase):
|
class TestUpdateClusterhostDeployedConfig(ClusterTestCase):
|
||||||
@ -1411,20 +1351,14 @@ class TestPatchClusterHostConfig(ClusterTestCase):
|
|||||||
self.assertItemsEqual(os_config, self.os_configs)
|
self.assertItemsEqual(os_config, self.os_configs)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.patch_cluster_host_config,
|
|
||||||
self.cluster_id,
|
|
||||||
self.host_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
os_config=self.os_configs,
|
|
||||||
package_config=self.package_configs
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestPatchClusterhostConfig(ClusterTestCase):
|
class TestPatchClusterhostConfig(ClusterTestCase):
|
||||||
@ -1453,19 +1387,14 @@ class TestPatchClusterhostConfig(ClusterTestCase):
|
|||||||
self.assertItemsEqual(os_config, self.os_configs)
|
self.assertItemsEqual(os_config, self.os_configs)
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.patch_clusterhost_config,
|
|
||||||
self.clusterhost_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
os_config=self.os_configs,
|
|
||||||
package_config=self.package_configs
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDeleteClusterHostConfig(ClusterTestCase):
|
class TestDeleteClusterHostConfig(ClusterTestCase):
|
||||||
@ -1503,18 +1432,14 @@ class TestDeleteClusterHostConfig(ClusterTestCase):
|
|||||||
self.assertEqual(config, {})
|
self.assertEqual(config, {})
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.delete_cluster_host_config,
|
|
||||||
self.cluster_id,
|
|
||||||
self.host_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDeleteClusterhostConfig(ClusterTestCase):
|
class TestDeleteClusterhostConfig(ClusterTestCase):
|
||||||
@ -1549,17 +1474,14 @@ class TestDeleteClusterhostConfig(ClusterTestCase):
|
|||||||
self.assertEqual(config, {})
|
self.assertEqual(config, {})
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
|
# cluster should be editable for expansion purposes.
|
||||||
|
raised = False
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
self.assertRaises(
|
self.assertFalse(raised, exception.Forbidden)
|
||||||
exception.Forbidden,
|
|
||||||
cluster.delete_clusterhost_config,
|
|
||||||
self.clusterhost_id[0],
|
|
||||||
user=self.user_object,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestUpdateClusterHosts(ClusterTestCase):
|
class TestUpdateClusterHosts(ClusterTestCase):
|
||||||
|
@ -25,7 +25,7 @@ FLAVORS = [{
|
|||||||
'display_name': 'HA-ansible-multinodes-juno',
|
'display_name': 'HA-ansible-multinodes-juno',
|
||||||
'template': 'HA-ansible-multinodes.tmpl',
|
'template': 'HA-ansible-multinodes.tmpl',
|
||||||
'roles': [
|
'roles': [
|
||||||
'controller', 'compute', 'ha', 'odl', 'onos', 'ceph'
|
'controller', 'compute', 'ha', 'odl', 'onos', 'ceph', 'ceph-adm', 'ceph-mon', 'ceph-osd', 'sec-patch', 'ceph-osd-node'
|
||||||
],
|
],
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@ -22,10 +22,10 @@ FLAVORS = [{
|
|||||||
],
|
],
|
||||||
}, {
|
}, {
|
||||||
'flavor': 'HA-ansible-multinodes-kilo',
|
'flavor': 'HA-ansible-multinodes-kilo',
|
||||||
'display_name': 'HA-ansible-multinodes',
|
'display_name': 'HA-ansible-multinodes-kilo',
|
||||||
'template': 'HA-ansible-multinodes.tmpl',
|
'template': 'HA-ansible-multinodes.tmpl',
|
||||||
'roles': [
|
'roles': [
|
||||||
'controller', 'compute', 'ha', 'odl', 'onos', 'ceph'
|
'controller', 'compute', 'ha', 'odl', 'onos', 'ceph', 'ceph-adm', 'ceph-mon', 'ceph-osd'
|
||||||
],
|
],
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
8
conf/flavor_mapping/HA-ansible-multinodes-juno.conf
Executable file → Normal file
8
conf/flavor_mapping/HA-ansible-multinodes-juno.conf
Executable file → Normal file
@ -42,6 +42,10 @@ CONFIG_MAPPING = {
|
|||||||
"volume": {
|
"volume": {
|
||||||
"username": "cinder",
|
"username": "cinder",
|
||||||
"password": "cinder"
|
"password": "cinder"
|
||||||
|
},
|
||||||
|
"heat": {
|
||||||
|
"username": "heat",
|
||||||
|
"password": "heat"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -82,6 +86,10 @@ CONFIG_MAPPING = {
|
|||||||
"username": "swift",
|
"username": "swift",
|
||||||
"password": "swift"
|
"password": "swift"
|
||||||
},
|
},
|
||||||
|
"heat": {
|
||||||
|
"username": "heat",
|
||||||
|
"password": "heat"
|
||||||
|
},
|
||||||
"volume": {
|
"volume": {
|
||||||
"username": "cinder",
|
"username": "cinder",
|
||||||
"password": "cinder"
|
"password": "cinder"
|
||||||
|
0
conf/flavor_mapping/HA-ansible-multinodes-kilo.conf
Executable file → Normal file
0
conf/flavor_mapping/HA-ansible-multinodes-kilo.conf
Executable file → Normal file
@ -8,7 +8,7 @@ METADATA = {
|
|||||||
'_self': {
|
'_self': {
|
||||||
'required_in_whole_config': True,
|
'required_in_whole_config': True,
|
||||||
'key_extensions': {
|
'key_extensions': {
|
||||||
'$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql']
|
'$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql', 'heat']
|
||||||
},
|
},
|
||||||
'mapping_to': 'service_credentials'
|
'mapping_to': 'service_credentials'
|
||||||
},
|
},
|
||||||
@ -37,7 +37,7 @@ METADATA = {
|
|||||||
'_self': {
|
'_self': {
|
||||||
'required_in_whole_config': True,
|
'required_in_whole_config': True,
|
||||||
'key_extensions': {
|
'key_extensions': {
|
||||||
'$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume']
|
'$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume', 'heat']
|
||||||
},
|
},
|
||||||
'mapping_to': 'console_credentials'
|
'mapping_to': 'console_credentials'
|
||||||
},
|
},
|
||||||
|
@ -77,9 +77,34 @@ ROLES = [{
|
|||||||
'role': 'ha',
|
'role': 'ha',
|
||||||
'display': 'Cluster with HA',
|
'display': 'Cluster with HA',
|
||||||
'description': 'Cluster with HA node'
|
'description': 'Cluster with HA node'
|
||||||
|
}, {
|
||||||
|
'role': 'ceph-adm',
|
||||||
|
'display': 'Ceph Admin Node',
|
||||||
|
'description': 'Ceph Admin Node',
|
||||||
|
'optional': True
|
||||||
|
}, {
|
||||||
|
'role': 'ceph-mon',
|
||||||
|
'display': 'Ceph Monitor Node',
|
||||||
|
'description': 'Ceph Monitor Node',
|
||||||
|
'optional': True
|
||||||
|
}, {
|
||||||
|
'role': 'ceph-osd',
|
||||||
|
'display': 'Ceph Storage Node',
|
||||||
|
'description': 'Ceph Storage Node',
|
||||||
|
'optional': True
|
||||||
|
}, {
|
||||||
|
'role': 'ceph-osd-node',
|
||||||
|
'display': 'Ceph osd install from node',
|
||||||
|
'description': '',
|
||||||
|
'optional': True
|
||||||
}, {
|
}, {
|
||||||
'role': 'ceph',
|
'role': 'ceph',
|
||||||
'display': 'Ceph storage',
|
'display': 'ceph node',
|
||||||
'description': 'Ceph storage',
|
'description': 'ceph node',
|
||||||
|
'optional': True
|
||||||
|
}, {
|
||||||
|
'role': 'sec-patch',
|
||||||
|
'display': 'sec-patch node',
|
||||||
|
'description': 'Security Patch Node',
|
||||||
'optional': True
|
'optional': True
|
||||||
}]
|
}]
|
||||||
|
@ -77,9 +77,24 @@ ROLES = [{
|
|||||||
'role': 'ha',
|
'role': 'ha',
|
||||||
'display': 'Cluster with HA',
|
'display': 'Cluster with HA',
|
||||||
'description': 'Cluster with HA node'
|
'description': 'Cluster with HA node'
|
||||||
|
}, {
|
||||||
|
'role': 'ceph-adm',
|
||||||
|
'display': 'Ceph Admin Node',
|
||||||
|
'description': 'Ceph Admin Node',
|
||||||
|
'optional': True
|
||||||
|
}, {
|
||||||
|
'role': 'ceph-mon',
|
||||||
|
'display': 'Ceph Monitor Node',
|
||||||
|
'description': 'Ceph Monitor Node',
|
||||||
|
'optional': True
|
||||||
|
}, {
|
||||||
|
'role': 'ceph-osd',
|
||||||
|
'display': 'Ceph Storage Node',
|
||||||
|
'description': 'Ceph Storage Node',
|
||||||
|
'optional': True
|
||||||
}, {
|
}, {
|
||||||
'role': 'ceph',
|
'role': 'ceph',
|
||||||
'display': 'Ceph storage',
|
'display': 'ceph node',
|
||||||
'description': 'Ceph storage',
|
'description': 'ceph node',
|
||||||
'optional': True
|
'optional': True
|
||||||
}]
|
}]
|
||||||
|
@ -3,7 +3,12 @@
|
|||||||
#set has = $getVar('ha', [])
|
#set has = $getVar('ha', [])
|
||||||
#set odls = $getVar('odl', [])
|
#set odls = $getVar('odl', [])
|
||||||
#set onoss = $getVar('onos', [])
|
#set onoss = $getVar('onos', [])
|
||||||
#set cephs = $getVar('ceph',[])
|
#set ceph_adm_list = $getVar('ceph_adm',[])
|
||||||
|
#set ceph_mon_list = $getVar('ceph_mon',[])
|
||||||
|
#set ceph_osd_list = $getVar('ceph_osd',[])
|
||||||
|
#set sec_patch_list = $getVar('sec_patch',[])
|
||||||
|
#set ceph_osd_node_list = $getVar('ceph_osd_node',[])
|
||||||
|
|
||||||
#if not $isinstance($controllers, list)
|
#if not $isinstance($controllers, list)
|
||||||
#set controllers = [$controllers]
|
#set controllers = [$controllers]
|
||||||
#end if
|
#end if
|
||||||
@ -19,9 +24,22 @@
|
|||||||
#if not $isinstance(onoss, list)
|
#if not $isinstance(onoss, list)
|
||||||
#set onoss = [onoss]
|
#set onoss = [onoss]
|
||||||
#end if
|
#end if
|
||||||
#if not $isinstance(cephs, list)
|
#if not $isinstance(ceph_adm_list, list)
|
||||||
#set cephs = [cephs]
|
#set ceph_adm_list = [ceph_adm_list]
|
||||||
#end if
|
#end if
|
||||||
|
#if not $isinstance(ceph_mon_list, list)
|
||||||
|
#set ceph_mon_list = [ceph_mon_list]
|
||||||
|
#end if
|
||||||
|
#if not $isinstance(ceph_osd_list, list)
|
||||||
|
#set ceph_osd_list = [ceph_osd_list]
|
||||||
|
#end if
|
||||||
|
#if not $isinstance(sec_patch_list, list)
|
||||||
|
#set sec_patch_list = [sec_patch_list]
|
||||||
|
#end if
|
||||||
|
#if not $isinstance(ceph_osd_node_list, list)
|
||||||
|
#set ceph_osd_node_list = [ceph_osd_node_list]
|
||||||
|
#end if
|
||||||
|
|
||||||
#set credentials = $getVar('server_credentials', {})
|
#set credentials = $getVar('server_credentials', {})
|
||||||
#set username = $credentials.get('username', 'root')
|
#set username = $credentials.get('username', 'root')
|
||||||
#set password = $credentials.get('password', 'root')
|
#set password = $credentials.get('password', 'root')
|
||||||
@ -55,9 +73,33 @@ $odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_pa
|
|||||||
#set onos_hostname = $onos.hostname
|
#set onos_hostname = $onos.hostname
|
||||||
$onos_hostname ansible_ssh_host=$onos_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
$onos_hostname ansible_ssh_host=$onos_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||||
#end for
|
#end for
|
||||||
[ceph]
|
[ceph_adm]
|
||||||
#for ceph in $cephs
|
#for ceph_adm in $ceph_adm_list
|
||||||
#set ceph_ip = $ceph.install.ip
|
#set ceph_adm_ip = $ceph_adm.install.ip
|
||||||
#set ceph_hostname = $ceph.hostname
|
#set ceph_adm_hostname = $ceph_adm.hostname
|
||||||
$ceph_hostname ansible_ssh_host=$ceph_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
$ceph_adm_hostname ansible_ssh_host=$ceph_adm_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||||
|
#end for
|
||||||
|
[ceph_mon]
|
||||||
|
#for ceph_mon in $ceph_mon_list
|
||||||
|
#set ceph_mon_ip = $ceph_mon.install.ip
|
||||||
|
#set ceph_mon_hostname = $ceph_mon.hostname
|
||||||
|
$ceph_mon_hostname ansible_ssh_host=$ceph_mon_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||||
|
#end for
|
||||||
|
[ceph_osd]
|
||||||
|
#for ceph_osd in $ceph_osd_list
|
||||||
|
#set ceph_osd_ip = $ceph_osd.install.ip
|
||||||
|
#set ceph_osd_hostname = $ceph_osd.hostname
|
||||||
|
$ceph_osd_hostname ansible_ssh_host=$ceph_osd_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||||
|
#end for
|
||||||
|
[sec_patch]
|
||||||
|
#for sec_patch in $sec_patch_list
|
||||||
|
#set sec_patch_ip = $sec_patch.install.ip
|
||||||
|
#set sec_patch_hostname = $sec_patch.hostname
|
||||||
|
$sec_patch_hostname ansible_ssh_host=$sec_patch_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||||
|
#end for
|
||||||
|
[ceph_osd_node]
|
||||||
|
#for ceph_osd_node in $ceph_osd_node_list
|
||||||
|
#set ceph_osd_node_ip = $ceph_osd_node.install.ip
|
||||||
|
#set ceph_osd_node_hostname = $ceph_osd_node.hostname
|
||||||
|
$ceph_osd_node_hostname ansible_ssh_host=$ceph_osd_node_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||||
#end for
|
#end for
|
||||||
|
@ -80,6 +80,11 @@ haproxy_hosts:
|
|||||||
$hostname: $ip_settings[$hostname]["mgmt"]["ip"]
|
$hostname: $ip_settings[$hostname]["mgmt"]["ip"]
|
||||||
#end for
|
#end for
|
||||||
|
|
||||||
|
host_index:
|
||||||
|
#for $index, $item in enumerate($has)
|
||||||
|
$item["hostname"]: $index
|
||||||
|
#end for
|
||||||
|
|
||||||
ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
|
ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
|
||||||
#set credentials = $getVar('service_credentials', {})
|
#set credentials = $getVar('service_credentials', {})
|
||||||
#set console_credentials = $getVar('console_credentials', {})
|
#set console_credentials = $getVar('console_credentials', {})
|
||||||
@ -95,6 +100,8 @@ ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
|
|||||||
#set dash_dbpass = $credentials.dashboard.password
|
#set dash_dbpass = $credentials.dashboard.password
|
||||||
#set cinder_dbpass = $credentials.volume.password
|
#set cinder_dbpass = $credentials.volume.password
|
||||||
#set cinder_pass = $console_credentials.volume.password
|
#set cinder_pass = $console_credentials.volume.password
|
||||||
|
#set heat_dbpass = $credentials.heat.password
|
||||||
|
#set heat_pass = $console_credentials.heat.password
|
||||||
#set admin_pass = $console_credentials.admin.password
|
#set admin_pass = $console_credentials.admin.password
|
||||||
#set neutron_pass = $console_credentials.network.password
|
#set neutron_pass = $console_credentials.network.password
|
||||||
|
|
||||||
@ -130,6 +137,8 @@ CINDER_DBPASS: $cinder_dbpass
|
|||||||
CINDER_PASS: $cinder_pass
|
CINDER_PASS: $cinder_pass
|
||||||
NEUTRON_DBPASS: $neutron_pass
|
NEUTRON_DBPASS: $neutron_pass
|
||||||
NEUTRON_PASS: $neutron_pass
|
NEUTRON_PASS: $neutron_pass
|
||||||
|
HEAT_DBPASS: $heat_dbpass
|
||||||
|
HEAT_PASS: $heat_pass
|
||||||
|
|
||||||
#set neutron_service_plugins=['router']
|
#set neutron_service_plugins=['router']
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user