From e8055850a1b99bad8eee148d87adf86c4538a120 Mon Sep 17 00:00:00 2001 From: xiaodongwang Date: Fri, 25 Jul 2014 09:40:54 -0700 Subject: [PATCH] change adapter code Change-Id: Id5709d8ac723b31596ec97cf251ef09f343372c6 --- compass/api/api.py | 367 +++++++--- compass/db/api/adapter.py | 268 +++----- compass/db/api/adapter_holder.py | 71 +- compass/db/api/cluster.py | 177 +++-- compass/db/api/database.py | 39 +- compass/db/api/host.py | 196 +++++- compass/db/api/machine.py | 2 +- compass/db/api/metadata.py | 141 ++-- compass/db/api/metadata_holder.py | 83 ++- compass/db/api/network.py | 27 +- compass/db/api/permission.py | 4 + compass/db/api/switch.py | 123 +++- compass/db/api/user.py | 17 +- compass/db/api/utils.py | 59 +- compass/db/models.py | 624 +++++++++--------- compass/tasks/tasks.py | 64 +- compass/tests/db/api/test_utils.py | 15 +- compass/utils/setting_wrapper.py | 5 +- conf/{package_adapter => adapter}/ceph.conf | 0 conf/adapter/chef_ceph.conf | 7 + conf/adapter/chef_openstack.conf | 7 + conf/{os_adapter => adapter}/general.conf | 1 - .../openstack.conf | 1 + conf/adapter/os_only.conf | 5 + conf/os_adapter/centos.conf | 3 - conf/os_adapter/cobbler_centos.conf | 3 - conf/os_adapter/cobbler_centos6.5.conf | 3 - conf/os_adapter/cobbler_ubuntu.conf | 3 - conf/os_adapter/cobbler_ubuntu12.04.conf | 3 - conf/os_adapter/ubuntu.conf | 3 - conf/os_metadata/general.conf | 6 +- conf/package_adapter/chef_ceph.conf | 4 - conf/package_adapter/chef_openstack.conf | 4 - conf/package_adapter/general.conf | 2 - conf/role/openstack_chef.conf | 11 +- 35 files changed, 1422 insertions(+), 926 deletions(-) rename conf/{package_adapter => adapter}/ceph.conf (100%) create mode 100644 conf/adapter/chef_ceph.conf create mode 100644 conf/adapter/chef_openstack.conf rename conf/{os_adapter => adapter}/general.conf (53%) rename conf/{package_adapter => adapter}/openstack.conf (60%) create mode 100644 conf/adapter/os_only.conf delete mode 100644 conf/os_adapter/centos.conf delete mode 100644 conf/os_adapter/cobbler_centos.conf delete mode 100644 conf/os_adapter/cobbler_centos6.5.conf delete mode 100644 conf/os_adapter/cobbler_ubuntu.conf delete mode 100644 conf/os_adapter/cobbler_ubuntu12.04.conf delete mode 100644 conf/os_adapter/ubuntu.conf delete mode 100644 conf/package_adapter/chef_ceph.conf delete mode 100644 conf/package_adapter/chef_openstack.conf delete mode 100644 conf/package_adapter/general.conf diff --git a/compass/api/api.py b/compass/api/api.py index 9204509f..0230fd3f 100644 --- a/compass/api/api.py +++ b/compass/api/api.py @@ -118,6 +118,40 @@ def _get_request_args(): return dict(request.args) +def _group_data_action(data, **data_callbacks): + if not data: + raise exception_handler.BadRequest( + 'no action to take' + ) + unsupported_keys = list(set(data) - set(data_callbacks)) + if unsupported_keys: + raise exception_handler.BadMethod( + 'unsupported actions: %s' % unsupported_keys + ) + callback_datas = {} + for data_key, data_value in data.items(): + callback = data_callbacks[data_key] + callback_datas.setdefault(id(callback), {})[data_key] = data_value + if len(callback_datas) > 1: + raise exception_handler.BadRequest( + 'multi actions are not supported' + ) + callback_ids = {} + for data_key, data_callback in data_callbacks.items(): + callback_ids[id(data_callback)] = data_callback + for callback_id, callback_data in callback_datas.items(): + return callback_ids[callback_id](**callback_data) + + +def _wrap_response(func, response_code): + def wrapped_func(*args, **kwargs): + return utils.make_json_response( + response_code, + func(*args, **kwargs) + ) + return wrapped_func + + def _login(use_cookie): """User login helper function.""" data = _get_request_data() @@ -245,18 +279,23 @@ def list_user_permissions(user_id): ) -@app.route("/users//permissions/actions", methods=['POST']) +@app.route("/users//action", methods=['POST']) @log_user_action @login_required -def update_user_permissions(user_id): +def take_user_action(user_id): """Update user permissions.""" data = _get_request_data() - return utils.make_json_response( - 200, - user_api.update_permissions( - current_user, user_id, - **data - ) + update_permissions_func = _wrap_response( + functools.partial( + user_api.update_permissions, current_user, user_id + ), + 200 + ) + return _group_data_action( + data, + add_permission=update_permissions_func, + remove_permissions=update_permissions_func, + set_permissions=update_permissions_func ) @@ -626,12 +665,12 @@ def _filter_port(data): ) -def _filter_vlans(data): - vlan_filter = {} - vlans = _get_data_list(data, 'vlans') - if vlans: - vlan_filter['resp_in'] = vlans - data['vlans'] = vlan_filter +def _filter_general(data, key): + general_filter = {} + general = _get_data_list(data, key) + if general: + general_filter['resp_in'] = general + data[key] = general_filter def _filter_tag(data): @@ -665,7 +704,7 @@ def list_switch_machines(switch_id): """Get switch machines.""" data = _get_request_args() _filter_port(data) - _filter_vlans(data) + _filter_general(data, 'vlans') _filter_tag(data) _filter_location(data) return utils.make_json_response( @@ -676,6 +715,26 @@ def list_switch_machines(switch_id): ) +@app.route("/switches//machines-hosts", methods=['GET']) +@log_user_action +@login_required +def list_switch_machines_hosts(switch_id): + """Get switch machines or hosts.""" + data = _get_request_args() + _filter_port(data) + _filter_general(data, 'vlans') + _filter_tag(data) + _filter_location(data) + _filter_general(data, 'os_name') + _filter_general(data, 'os_id') + return utils.make_json_response( + 200, + switch_api.list_switch_machines_hosts( + current_user, switch_id, **data + ) + ) + + @app.route("/switches//machines", methods=['POST']) @log_user_action @login_required @@ -705,23 +764,6 @@ def show_switch_machine(switch_id, machine_id): ) -@app.route( - '/switches//machines/actions', - methods=['POST'] -) -@log_user_action -@login_required -def update_switch_machines(switch_id): - """update switch machine.""" - data = _get_request_data() - return utils.make_json_response( - 200, - switch_api.update_switch_machines( - current_user, switch_id, **data - ) - ) - - @app.route( '/switches//machines/', methods=['PUT'] @@ -782,27 +824,31 @@ def delete_switch_machine(switch_id, machine_id): ) -@app.route("/switches//actions", methods=['POST']) +@app.route("/switches//action", methods=['POST']) @log_user_action @login_required def take_switch_action(switch_id): """update switch.""" data = _get_request_data() - if 'find_machines' in data: - return utils.make_json_response( - 202, - switch_api.poll_switch_machines( - current_user, switch_id, **data['find_machines'] - ) - ) - else: - return utils.make_json_response( - 200, - { - 'status': 'unknown action', - 'details': 'supported actions: %s' % str(['find_machines']) - } - ) + poll_switch_machines_func = _wrap_response( + functools.partial( + switch_api.poll_switch_machines, current_user, switch_id + ), + 202 + ) + update_switch_machines_func = _wrap_response( + functools.partial( + switch_api.update_switch_machines, current_user, switch_id + ), + 200 + ) + return _group_data_action( + data, + find_machines=poll_switch_machines_func, + add_machines=update_switch_machines_func, + remove_machines=update_switch_machines_func, + set_machines=update_switch_machines_func + ) @app.route("/switch-machines", methods=['GET']) @@ -812,8 +858,9 @@ def list_switchmachines(): """List switch machines.""" data = _get_request_args() _filter_ip(data) + _replace_data(data, {'ip_int': 'switch_ip_int'}) _filter_port(data) - _filter_vlans(data) + _filter_general(data, 'vlans') _filter_tag(data) _filter_location(data) return utils.make_json_response( @@ -824,6 +871,28 @@ def list_switchmachines(): ) +@app.route("/switches-machines-hosts", methods=['GET']) +@log_user_action +@login_required +def list_switchmachines_hosts(): + """List switch machines or hosts.""" + data = _get_request_args() + _filter_ip(data) + _replace_data(data, {'ip_int': 'switch_ip_int'}) + _filter_port(data) + _filter_general(data, 'vlans') + _filter_tag(data) + _filter_location(data) + _filter_general(data, 'os_name') + _filter_general(data, 'os_id') + return utils.make_json_response( + 200, + switch_api.list_switchmachines_hosts( + current_user, **data + ) + ) + + @app.route( '/switch-machines/', methods=['GET'] @@ -975,7 +1044,7 @@ def delete_machine(machine_id): ) -@app.route("/networks", methods=['GET']) +@app.route("/subnets", methods=['GET']) @log_user_action @login_required def list_subnets(): @@ -989,7 +1058,7 @@ def list_subnets(): ) -@app.route("/networks/", methods=['GET']) +@app.route("/subnets/", methods=['GET']) @log_user_action @login_required def show_subnet(subnet_id): @@ -1003,7 +1072,7 @@ def show_subnet(subnet_id): ) -@app.route("/networks", methods=['POST']) +@app.route("/subnets", methods=['POST']) @log_user_action @login_required def add_subnet(): @@ -1015,7 +1084,7 @@ def add_subnet(): ) -@app.route("/networks/", methods=['PUT']) +@app.route("/subnets/", methods=['PUT']) @log_user_action @login_required def update_subnet(subnet_id): @@ -1029,7 +1098,7 @@ def update_subnet(subnet_id): ) -@app.route("/networks/", methods=['DELETE']) +@app.route("/subnets/", methods=['DELETE']) @log_user_action @login_required def delete_subnet(subnet_id): @@ -1049,6 +1118,10 @@ def delete_subnet(subnet_id): def list_adapters(): """List adapters.""" data = _get_request_args() + _filter_general(data, 'name') + _filter_general(data, 'distributed_system_name') + _filter_general(data, 'os_installer_name') + _filter_general(data, 'package_installer_name') return utils.make_json_response( 200, adapter_api.list_adapters( @@ -1088,17 +1161,31 @@ def show_adapter_roles(adapter_id): @app.route("/adapters//metadata", methods=['GET']) @log_user_action @login_required -def show_metadata(adapter_id): +def show_adapter_metadata(adapter_id): """Get adapter metadata.""" data = _get_request_args() return utils.make_json_response( 200, - metadata_api.get_metadata( + metadata_api.get_package_metadata( current_user, adapter_id, **data ) ) +@app.route("/oses//metadata", methods=['GET']) +@log_user_action +@login_required +def show_os_metadata(os_id): + """Get os metadata.""" + data = _get_request_args() + return utils.make_json_response( + 200, + metadata_api.get_os_metadata( + current_user, os_id, **data + ) + ) + + @app.route("/clusters", methods=['GET']) @log_user_action @login_required @@ -1181,6 +1268,20 @@ def show_cluster_config(cluster_id): ) +@app.route("/clusters//metadata", methods=['GET']) +@log_user_action +@login_required +def show_cluster_metadata(cluster_id): + """Get cluster config.""" + data = _get_request_args() + return utils.make_json_response( + 200, + cluster_api.get_cluster_metadata( + current_user, cluster_id, **data + ) + ) + + @app.route("/clusters//config", methods=['PUT']) @log_user_action @login_required @@ -1233,37 +1334,37 @@ def delete_cluster_config(cluster_id): ) -@app.route("/clusters//review", methods=['POST']) -@log_user_action -@login_required -def review_cluster(cluster_id): - """review cluster""" - data = _get_request_data() - return utils.make_json_response( - 200, - cluster_api.review_cluster(current_user, cluster_id, **data) - ) - - -@app.route("/clusters//actions", methods=['POST']) +@app.route("/clusters//action", methods=['POST']) @log_user_action @login_required def take_cluster_action(cluster_id): """take cluster action.""" data = _get_request_data() - if 'deploy' in data: - return utils.make_json_response( - 202, - cluster_api.deploy_cluster( - current_user, cluster_id, **data['deploy'] - ) - ) - return utils.make_json_response( - 200, - { - 'status': 'unknown action', - 'details': 'supported actions: %s' % str(['deploy']) - } + update_cluster_hosts_func = _wrap_response( + functools.partial( + cluster_api.update_cluster_hosts, current_user, cluster_id + ), + 200 + ) + review_cluster_func = _wrap_response( + functools.partial( + cluster_api.review_cluster, current_user, cluster_id + ), + 200 + ) + deploy_cluster_func = _wrap_response( + functools.partial( + cluster_api.deploy_cluster, current_user, cluster_id + ), + 202 + ) + return _group_data_action( + data, + add_hosts=update_cluster_hosts_func, + set_hosts=update_cluster_hosts_func, + remove_hosts=update_cluster_hosts_func, + review=review_cluster_func, + deploy=deploy_cluster_func ) @@ -1383,18 +1484,6 @@ def delete_clusterhost(clusterhost_id): ) -@app.route("/clusters//hosts/actions", methods=['POST']) -@log_user_action -@login_required -def update_cluster_hosts(cluster_id): - """update cluster hosts.""" - data = _get_request_data() - return utils.make_json_response( - 200, - cluster_api.update_cluster_hosts(current_user, cluster_id, **data) - ) - - @app.route( "/clusters//hosts//config", methods=['GET'] @@ -1633,6 +1722,38 @@ def show_host(host_id): ) +@app.route("/machines-hosts", methods=['GET']) +@log_user_action +@login_required +def list_machines_or_hosts(): + """Get host.""" + data = _get_request_args() + _filter_tag(data) + _filter_location(data) + _filter_general(data, 'os_name') + _filter_general(data, 'os_name') + return utils.make_json_response( + 200, + host_api.list_machines_or_hosts( + current_user, **data + ) + ) + + +@app.route("/machines-hosts/", methods=['GET']) +@log_user_action +@login_required +def show_machine_or_host(host_id): + """Get host.""" + data = _get_request_args() + return utils.make_json_response( + 200, + host_api.get_machine_or_host( + current_user, host_id, **data + ) + ) + + @app.route("/hosts/", methods=['PUT']) @log_user_action @login_required @@ -1887,6 +2008,64 @@ def update_host_state(host_id): ) +def _poweron_host(*args, **kwargs): + return utils.make_json_response( + 202, + host_api.poweron_host( + *args, **kwargs + ) + ) + + +def _poweroff_host(*args, **kwargs): + return utils.make_json_response( + 202, + host_api.poweroff_host( + *args, **kwargs + ) + ) + + +def _reset_host(*args, **kwargs): + return utils.make_json_response( + 202, + host_api.reset_host( + *args, **kwargs + ) + ) + + +@app.route("/hosts//action", methods=['POST']) +@log_user_action +@login_required +def take_host_action(host_id): + """take host action.""" + data = _get_request_data() + poweron_func = _wrap_response( + functools.partial( + host_api.poweron_host, current_user, host_id + ), + 202 + ) + poweroff_func = _wrap_response( + functools.partial( + host_api.poweroff_host, current_user, host_id + ), + 202 + ) + reset_func = _wrap_response( + functools.partial( + host_api.reset_host, current_user, host_id + ) + ) + return _group_data_action( + data, + poweron=poweron_func, + poweroff=poweroff_func, + reset=reset_func, + ) + + def init(): logging.info('init flask') database.init() diff --git a/compass/db/api/adapter.py b/compass/db/api/adapter.py index a1101f67..a4db88ba 100644 --- a/compass/db/api/adapter.py +++ b/compass/db/api/adapter.py @@ -25,48 +25,13 @@ from compass.utils import setting_wrapper as setting from compass.utils import util -def _copy_adapters_from_parent(session, model, parent, system_name): - for child in parent.children: - if not child.adapters: - for adapter in parent.adapters: - if adapter.children: - continue - utils.add_db_object( - session, model, - True, - '%s(%s)' % (child.name, adapter.installer_name), - system_name=child, parent=adapter - ) - _copy_adapters_from_parent(session, model, child, system_name) - - -def _complement_os_adapters(session): - with session.begin(subtransactions=True): - root_oses = utils.list_db_objects( - session, models.OperatingSystem, - parent_id=None - ) - for root_os in root_oses: - _copy_adapters_from_parent( - session, models.OSAdapter, root_os, 'os' - ) - - -def _complement_distributed_system_adapters(session): - with session.begin(subtransactions=True): - root_dses = utils.list_db_objects( - session, models.DistributedSystem, - parent_id=None - ) - for root_ds in root_dses: - _copy_adapters_from_parent( - session, models.PackageAdapter, root_ds, 'distributed_system' - ) - - def _add_system(session, model, configs): parents = {} for config in configs: + logging.info( + 'add config %s to %s', + config, model + ) object = utils.add_db_object( session, model, True, config['NAME'], @@ -85,154 +50,111 @@ def _add_system(session, model, configs): def add_oses_internal(session): configs = util.load_configs(setting.OS_DIR) - with session.begin(subtransactions=True): - _add_system(session, models.OperatingSystem, configs) + _add_system(session, models.OperatingSystem, configs) def add_distributed_systems_internal(session): configs = util.load_configs(setting.DISTRIBUTED_SYSTEM_DIR) - with session.begin(subtransactions=True): - _add_system(session, models.DistributedSystem, configs) - - -def add_os_adapters_internal(session): - parents = {} - configs = util.load_configs(setting.OS_ADAPTER_DIR) - with session.begin(subtransactions=True): - for config in configs: - if 'OS' in config: - os = utils.get_db_object( - session, models.OperatingSystem, - name=config['OS'] - ) - else: - os = None - if 'INSTALLER' in config: - installer = utils.get_db_object( - session, models.OSInstaller, - name=config['INSTALLER'] - ) - else: - installer = None - object = utils.add_db_object( - session, models.OSAdapter, - True, config['NAME'], os=os, installer=installer - ) - parents[config['NAME']] = (object, config.get('PARENT', None)) - for name, (object, parent_name) in parents.items(): - if parent_name: - parent, _ = parents[parent_name] - else: - parent = None - utils.update_db_object( - session, object, parent=parent - ) - - _complement_os_adapters(session) - - -def add_package_adapters_internal(session): - parents = {} - configs = util.load_configs(setting.PACKAGE_ADAPTER_DIR) - with session.begin(subtransactions=True): - for config in configs: - if 'DISTRIBUTED_SYSTEM' in config: - distributed_system = utils.get_db_object( - session, models.DistributedSystem, - name=config['DISTRIBUTED_SYSTEM'] - ) - else: - distributed_system = None - if 'INSTALLER' in config: - installer = utils.get_db_object( - session, models.PackageInstaller, - name=config['INSTALLER'] - ) - else: - installer = None - object = utils.add_db_object( - session, models.PackageAdapter, - True, - config['NAME'], - distributed_system=distributed_system, - installer=installer, - supported_os_patterns=config.get('SUPPORTED_OS_PATTERNS', []) - ) - parents[config['NAME']] = (object, config.get('PARENT', None)) - for name, (object, parent_name) in parents.items(): - if parent_name: - parent, _ = parents[parent_name] - else: - parent = None - utils.update_db_object(session, object, parent=parent) - - _complement_distributed_system_adapters(session) - - -def add_roles_internal(session): - configs = util.load_configs(setting.PACKAGE_ROLE_DIR) - with session.begin(subtransactions=True): - for config in configs: - package_adapter = utils.get_db_object( - session, models.PackageAdapter, - name=config['ADAPTER_NAME'] - ) - for role_dict in config['ROLES']: - utils.add_db_object( - session, models.PackageAdapterRole, - True, role_dict['role'], package_adapter.id, - description=role_dict['description'], - optional=role_dict.get('optional', False) - ) + _add_system(session, models.DistributedSystem, configs) def add_adapters_internal(session): - with session.begin(subtransactions=True): - package_adapters = [ - package_adapter - for package_adapter in utils.list_db_objects( - session, models.PackageAdapter + parents = {} + configs = util.load_configs(setting.ADAPTER_DIR) + for config in configs: + logging.info('add config %s to adapter', config) + if 'DISTRIBUTED_SYSTEM' in config: + distributed_system = utils.get_db_object( + session, models.DistributedSystem, + name=config['DISTRIBUTED_SYSTEM'] ) - if package_adapter.deployable - ] - os_adapters = [ - os_adapter - for os_adapter in utils.list_db_objects( - session, models.OSAdapter + else: + distributed_system = None + if 'OS_INSTALLER' in config: + os_installer = utils.get_db_object( + session, models.OSInstaller, + name=config['OS_INSTALLER'] ) - if os_adapter.deployable + else: + os_installer = None + if 'PACKAGE_INSTALLER' in config: + package_installer = utils.get_db_object( + session, models.PackageInstaller, + name=config['PACKAGE_INSTALLER'] + ) + else: + package_installer = None + adapter = utils.add_db_object( + session, models.Adapter, + True, + config['NAME'], + display_name=config.get('DISPLAY_NAME', None), + distributed_system=distributed_system, + os_installer=os_installer, + package_installer=package_installer, + deployable=config.get('DEPLOYABLE', False) + ) + supported_os_patterns = [ + re.compile(supported_os_pattern) + for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', []) ] - adapters = [] - for os_adapter in os_adapters: - adapters.append(utils.add_db_object( - session, models.Adapter, True, - os_adapter.id, None - )) - for package_adapter in package_adapters: - adapters.append(utils.add_db_object( - session, models.Adapter, True, - None, package_adapter.id - )) - for os_adapter in os_adapters: - for os_pattern in ( - package_adapter.adapter_supported_os_patterns - ): - if re.match(os_pattern, os_adapter.name): - adapters.append(utils.add_db_object( - session, models.Adapter, True, - os_adapter.id, package_adapter.id - )) - break - return adapters + oses = utils.list_db_objects( + session, models.OperatingSystem + ) + for os in oses: + if not os.deployable: + continue + os_name = os.name + for supported_os_pattern in supported_os_patterns: + if supported_os_pattern.match(os_name): + utils.add_db_object( + session, models.AdapterOS, + True, + os.id, adapter.id + ) + break + parents[config['NAME']] = (adapter, config.get('PARENT', None)) + + for name, (adapter, parent_name) in parents.items(): + if parent_name: + parent, _ = parents[parent_name] + else: + parent = None + utils.update_db_object(session, adapter, parent=parent) + + +def add_roles_internal(session): + configs = util.load_configs(setting.ADAPTER_ROLE_DIR) + for config in configs: + logging.info( + 'add config to role', config + ) + adapter = utils.get_db_object( + session, models.Adapter, + name=config['ADAPTER_NAME'] + ) + for role_dict in config['ROLES']: + utils.add_db_object( + session, models.AdapterRole, + True, role_dict['role'], adapter.id, + display_name=role_dict.get('display_name', None), + description=role_dict.get('description', None), + optional=role_dict.get('optional', False) + ) def get_adapters_internal(session): adapter_mapping = {} - with session.begin(subtransactions=True): - adapters = utils.list_db_objects( - session, models.Adapter - ) - for adapter in adapters: + adapters = utils.list_db_objects( + session, models.Adapter + ) + for adapter in adapters: + if adapter.deployable: adapter_dict = adapter.to_dict() adapter_mapping[adapter.id] = adapter_dict + else: + logging.info( + 'ignore adapter %s since it is not deployable', + adapter_dict + ) return adapter_mapping diff --git a/compass/db/api/adapter_holder.py b/compass/db/api/adapter_holder.py index 425320ca..95024c54 100644 --- a/compass/db/api/adapter_holder.py +++ b/compass/db/api/adapter_holder.py @@ -24,16 +24,22 @@ from compass.db import exception SUPPORTED_FIELDS = [ - 'name', 'os', 'distributed_system', 'os_installer', 'package_installer' + 'name', + 'distributed_system_name', + 'os_installer_name', + 'package_installer_name', +] +RESP_FIELDS = [ + 'id', 'name', 'roles', 'os_installer_name', + 'package_installer_name', 'distributed_system_name', + 'supported_oses', 'display_name' +] +RESP_OS_FIELDS = [ + 'id', 'os_id', 'name' +] +RESP_ROLES_FIELDS = [ + 'id', 'name', 'description', 'optional' ] -OS_FIELD_MAPPING = { - 'os': 'os_name', - 'os_installer': 'installer_type' -} -PACKAGE_FIELD_MAPPING = { - 'distributed_system': 'distributed_system_name', - 'package_installer': 'installer_type' -} @database.run_in_session() @@ -70,30 +76,19 @@ def _filter_adapters(adapter_config, filter_name, filter_value): @user_api.check_user_permission_in_session( permission.PERMISSION_LIST_ADAPTERS ) +@utils.output_filters( + name=utils.general_filter_callback, + distributed_system_name=utils.general_filter_callback, + os_installer_name=utils.general_filter_callback, + package_installer_name=utils.general_filter_callback +) +@utils.wrap_to_dict( + RESP_FIELDS, + supported_oses=RESP_OS_FIELDS +) def list_adapters(session, lister, **filters): """list adapters.""" - translated_filters = {} - for filter_name, filter_value in filters: - if filter_name in OS_FIELD_MAPPING: - translated_filters.setdefault('os_adapter', {})[ - OS_FIELD_MAPPING[filter_name] - ] = filter_value - elif filter_name in PACKAGE_FIELD_MAPPING: - translated_filters.setdefault('package_adapter', {})[ - PACKAGE_FIELD_MAPPING[filter_name] - ] = filter_value - else: - translated_filters[filter_name] = filter_value - - filtered_adapter_dicts = [] - adapter_dicts = ADAPTER_MAPPING.values() - for adapter_dict in adapter_dicts: - if all([ - _filter_adapters(adapter_dict, filter_name, filter_value) - for filter_name, filter_value in translated_filters.items() - ]): - filtered_adapter_dicts.append(adapter_dict) - return filtered_adapter_dicts + return ADAPTER_MAPPING.values() @utils.supported_filters([]) @@ -101,6 +96,10 @@ def list_adapters(session, lister, **filters): @user_api.check_user_permission_in_session( permission.PERMISSION_LIST_ADAPTERS ) +@utils.wrap_to_dict( + RESP_FIELDS, + supported_oses=RESP_OS_FIELDS +) def get_adapter(session, getter, adapter_id, **kwargs): """get adapter.""" if adapter_id not in ADAPTER_MAPPING: @@ -115,15 +114,11 @@ def get_adapter(session, getter, adapter_id, **kwargs): @user_api.check_user_permission_in_session( permission.PERMISSION_LIST_ADAPTERS ) -def get_adapter_roles(getter, adapter_id, **kwargs): +@utils.wrap_to_dict(RESP_ROLES_FIELDS) +def get_adapter_roles(session, getter, adapter_id, **kwargs): """get adapter roles.""" if adapter_id not in ADAPTER_MAPPING: raise exception.RecordNotExists( 'adpater %s does not exist' % adapter_id ) - adapter_dict = ADAPTER_MAPPING[adapter_id] - if 'package_adapter' not in adapter_dict: - raise exception.RecordNotExists( - 'adapter %s does not contain package_adapter' % adapter_id - ) - return ADAPTER_MAPPING[adapter_id]['package_adapter']['roles'] + return ADAPTER_MAPPING[adapter_id].get('roles', []) diff --git a/compass/db/api/cluster.py b/compass/db/api/cluster.py index 89f4dcce..3f7161fb 100644 --- a/compass/db/api/cluster.py +++ b/compass/db/api/cluster.py @@ -30,7 +30,8 @@ SUPPORTED_FIELDS = [ ] SUPPORTED_CLUSTERHOST_FIELDS = [] RESP_FIELDS = [ - 'id', 'name', 'os_name', 'reinstall_distributed_system', + 'id', 'name', 'os_name', 'os_id', 'distributed_system_id', + 'reinstall_distributed_system', 'distributed_system_name', 'distributed_system_installed', 'owner', 'adapter_id', 'created_at', 'updated_at' @@ -51,10 +52,16 @@ RESP_CONFIG_FIELDS = [ 'created_at', 'updated_at' ] +RESP_METADATA_FIELDS = [ + 'os_config', + 'package_config' +] RESP_CLUSTERHOST_CONFIG_FIELDS = [ 'package_config', + 'os_config', 'config_step', 'config_validated', + 'networks', 'created_at', 'updated_at' ] @@ -67,15 +74,15 @@ RESP_CLUSTERHOST_STATE_FIELDS = [ 'created_at', 'updated_at' ] RESP_REVIEW_FIELDS = [ - 'cluster', 'hosts' + 'cluster', 'clusterhosts' ] -RESP_ACTION_FIELDS = [ - 'status', 'details' +RESP_DEPLOY_FIELDS = [ + 'status', 'cluster', 'clusterhosts' ] ADDED_FIELDS = ['name', 'adapter_id'] +OPTIONAL_ADDED_FIELDS = ['os_id'] UPDATED_FIELDS = ['name', 'reinstall_distributed_system'] -ADDED_CLUSTERHOST_FIELDS = ['machine_id'] -UPDATED_CLUSTERHOST_FIELDS = ['name', 'reinstall_os'] +ADDED_HOST_FIELDS = ['machine_id'] UPDATED_HOST_FIELDS = ['name', 'reinstall_os'] UPDATED_CONFIG_FIELDS = [ 'put_os_config', 'put_package_config', 'config_step' @@ -129,6 +136,25 @@ def _conditional_exception(cluster, exception_when_not_editable): return False +def is_cluster_validated( + session, cluster +): + if not cluster.config_validated: + raise exception.Forbidden( + 'cluster %s is not validated' % cluster.name + ) + for clusterhost in cluster.clusterhsots: + if not clusterhost.config_validated: + raise exception.Forbidden( + 'clusterhost %s is not validated' % clusterhost.name + ) + host = clusterhost.host + if not host.config_validated: + raise exception.Forbidden( + 'host %s is not validated' % host.name + ) + + def is_cluster_editable( session, cluster, user, reinstall_distributed_system_set=False, @@ -150,7 +176,9 @@ def is_cluster_editable( return True -@utils.supported_filters(ADDED_FIELDS) +@utils.supported_filters( + ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS +) @database.run_in_session() @user_api.check_user_permission_in_session( permission.PERMISSION_ADD_CLUSTER @@ -160,7 +188,8 @@ def add_cluster(session, creator, name, adapter_id, **kwargs): """Create a cluster.""" return utils.add_db_object( session, models.Cluster, True, - name, adapter_id=adapter_id, creator_id=creator.id, **kwargs + name, creator_id=creator.id, adapter_id=adapter_id, + **kwargs ) @@ -212,6 +241,31 @@ def get_cluster_config(session, getter, cluster_id, **kwargs): ) +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission_in_session( + permission.PERMISSION_LIST_METADATAS +) +@utils.wrap_to_dict(RESP_METADATA_FIELDS) +def get_cluster_metadata(session, getter, cluster_id, **kwargs): + """Get cluster metadata.""" + cluster = utils.get_db_object( + session, models.Cluster, id=cluster_id + ) + metadatas = {} + os = cluster.os + if os: + metadatas['os_config'] = metadata_api.get_os_metadata_internal( + os.id + ) + adapter = cluster.adapter + if adapter: + metadatas['package_ocnfig'] = ( + metadata_api.get_package_metadata_internal(adapter.id) + ) + return metadatas + + @user_api.check_user_permission_in_session( permission.PERMISSION_ADD_CLUSTER_CONFIG ) @@ -225,7 +279,7 @@ def update_cluster_config_internal(session, updater, cluster, **kwargs): os_config = cluster.os_config if os_config: metadata_api.validate_os_config( - os_config, cluster.adapter_id + os_config, cluster.os_id ) package_config = cluster.package_config if package_config: @@ -278,8 +332,8 @@ def del_cluster_config(session, deleter, cluster_id): @utils.supported_filters( - ADDED_CLUSTERHOST_FIELDS, - optional_support_keys=UPDATED_CLUSTERHOST_FIELDS + ADDED_HOST_FIELDS, + optional_support_keys=UPDATED_HOST_FIELDS ) def add_clusterhost_internal( session, cluster, @@ -287,41 +341,32 @@ def add_clusterhost_internal( machine_id=None, **kwargs ): from compass.db.api import host as host_api - host_dict = {} - clusterhost_dict = {} - for key, value in kwargs.items(): - if key in UPDATED_HOST_FIELDS: - host_dict[key] = value - else: - clusterhost_dict[key] = value - with session.begin(subtransactions=True): - host = utils.get_db_object( - session, models.Host, False, id=machine_id - ) - if host: - if host_api.is_host_editable( - session, host, cluster.creator, - reinstall_os_set=host_dict.get('reinstall_os', False), - exception_when_not_editable=False - ): - utils.update_db_object( - session, host, adapter=cluster.adapter.os_adapter, - **host_dict - ) - else: - logging.info('host %s is not editable', host.name) - else: - utils.add_db_object( - session, models.Host, False, machine_id, - os=cluster.os, - adapter=cluster.adapter.os_adapter, - creator=cluster.creator, - **host_dict + host = utils.get_db_object( + session, models.Host, False, id=machine_id + ) + if host: + if host_api.is_host_editable( + session, host, cluster.creator, + reinstall_os_set=kwargs.get('reinstall_os', False), + exception_when_not_editable=False + ): + utils.update_db_object( + session, host, + **kwargs ) - return utils.add_db_object( - session, models.ClusterHost, exception_when_existing, - cluster.id, machine_id, **clusterhost_dict + else: + logging.info('host %s is not editable', host.name) + else: + utils.add_db_object( + session, models.Host, False, machine_id, + os=cluster.os, + creator=cluster.creator, + **kwargs ) + return utils.add_db_object( + session, models.ClusterHost, exception_when_existing, + cluster.id, machine_id + ) def _add_clusterhosts(session, cluster, machine_dicts): @@ -404,8 +449,8 @@ def get_clusterhost(session, getter, clusterhost_id, **kwargs): @utils.supported_filters( - ADDED_CLUSTERHOST_FIELDS, - optional_support_keys=UPDATED_CLUSTERHOST_FIELDS + ADDED_HOST_FIELDS, + optional_support_keys=UPDATED_HOST_FIELDS ) @database.run_in_session() @user_api.check_user_permission_in_session( @@ -629,13 +674,17 @@ def update_cluster_hosts( return cluster.clusterhosts -@utils.supported_filters([]) +@utils.supported_filters(optional_support_keys=['review']) @database.run_in_session() @user_api.check_user_permission_in_session( permission.PERMISSION_REVIEW_CLUSTER ) -@utils.wrap_to_dict(RESP_REVIEW_FIELDS) -def review_cluster(session, reviewer, cluster_id): +@utils.wrap_to_dict( + RESP_REVIEW_FIELDS, + cluster=RESP_CONFIG_FIELDS, + clusterhosts=RESP_CLUSTERHOST_CONFIG_FIELDS +) +def review_cluster(session, reviewer, cluster_id, review={}, **kwargs): """review cluster.""" from compass.db.api import host as host_api cluster = utils.get_db_object( @@ -645,7 +694,7 @@ def review_cluster(session, reviewer, cluster_id): os_config = cluster.os_config if os_config: metadata_api.validate_os_config( - os_config, cluster.adapter_id, True + os_config, cluster.os_id, True ) for clusterhost in cluster.clusterhosts: host = clusterhost.host @@ -662,8 +711,9 @@ def review_cluster(session, reviewer, cluster_id): os_config, host_os_config ) metadata_api.validate_os_config( - deployed_os_config, host.adapter_id, True + deployed_os_config, host.os_id, True ) + host_api.validate_host(session, host) host.deployed_os_config = deployed_os_config host.config_validated = True package_config = cluster.package_config @@ -684,37 +734,38 @@ def review_cluster(session, reviewer, cluster_id): clusterhost.config_validated = True cluster.config_validated = True return { - 'cluster': cluster.to_dict(), - 'clusterhosts': [ - clusterhost.to_dict() - for clusterhost in cluster.clusterhosts - ] + 'cluster': cluster, + 'clusterhosts': cluster.clusterhosts } -@utils.supported_filters(optional_support_keys=['clusterhosts']) +@utils.supported_filters(optional_support_keys=['deploy']) @database.run_in_session() @user_api.check_user_permission_in_session( permission.PERMISSION_DEPLOY_CLUSTER ) -@utils.wrap_to_dict(RESP_ACTION_FIELDS) +@utils.wrap_to_dict( + RESP_DEPLOY_FIELDS, + cluster=RESP_CONFIG_FIELDS, + clusterhosts=RESP_CLUSTERHOST_FIELDS +) def deploy_cluster( - session, deployer, cluster_id, clusterhosts=[], **kwargs + session, deployer, cluster_id, deploy={}, **kwargs ): """deploy cluster.""" from compass.tasks import client as celery_client cluster = utils.get_db_object( session, models.Cluster, id=cluster_id ) - is_cluster_editable(session, cluster, deployer) + is_cluster_validated(session, cluster) celery_client.celery.send_task( 'compass.tasks.deploy', - (cluster_id, clusterhosts) + (cluster_id, deploy.get('clusterhosts', [])) ) return { 'status': 'deploy action sent', - 'details': { - } + 'cluster': cluster, + 'clusterhosts': cluster.clusterhosts } diff --git a/compass/db/api/database.py b/compass/db/api/database.py index d478272c..389608d0 100644 --- a/compass/db/api/database.py +++ b/compass/db/api/database.py @@ -182,27 +182,12 @@ def _setup_distributed_systems(distributed_system_session): ) -def _setup_os_adapters(adapter_session): - """Initialize os adapter table.""" - logging.info('setup os adapter table') - from compass.db.api import adapter - adapter.add_os_adapters_internal( - adapter_session) - - -def _setup_package_adapters(adapter_session): - """Initialize package adapter table.""" - logging.info('setup package adapter table') - from compass.db.api import adapter - adapter.add_package_adapters_internal( - adapter_session) - - def _setup_adapters(adapter_session): - """Initialize adapter table.""" + """Initialize package adapter table.""" logging.info('setup adapter table') from compass.db.api import adapter - adapter.add_adapters_internal(adapter_session) + adapter.add_adapters_internal( + adapter_session) def _setup_os_fields(field_session): @@ -233,9 +218,9 @@ def _setup_package_metadatas(metadata_session): metadata.add_package_metadata_internal(metadata_session) -def _setup_package_adapter_roles(role_session): +def _setup_adapter_roles(role_session): """Initialize package adapter role table.""" - logging.info('setup package adapter role table') + logging.info('setup adapter role table') from compass.db.api import adapter adapter.add_roles_internal(role_session) @@ -251,10 +236,8 @@ def create_db(my_session): _setup_package_installers(my_session) _setup_oses(my_session) _setup_distributed_systems(my_session) - _setup_os_adapters(my_session) - _setup_package_adapters(my_session) - _setup_package_adapter_roles(my_session) _setup_adapters(my_session) + _setup_adapter_roles(my_session) _setup_os_fields(my_session) _setup_package_fields(my_session) _setup_os_metadatas(my_session) @@ -284,22 +267,18 @@ def create_table(my_session, table): models.PackageInstaller, models.OperatingSystem, models.DistributedSystems, - models.OSAdapter, - models.PackageAdapter, models.Adapter ]: _setup_os_installers(my_session) _setup_package_installers(my_session) - _setup_os_adapters(my_session) - _setup_package_adapters(my_session) - _setup_package_adapter_roles(my_session) + _setup_adapter_roles(my_session) _setup_adapters(my_session) _setup_os_fields(my_session) _setup_os_metadatas(my_session) _setup_package_fields(my_session) _setup_package_metadatas(my_session) - elif table == models.PackageAdapterRole: - _setup_package_adapter_roles(my_session) + elif table == models.AdapterRole: + _setup_adapter_roles(my_session) elif table in [ models.OSConfigField, models.PackageConfigField, diff --git a/compass/db/api/host.py b/compass/db/api/host.py index a09ea77e..d2ea77c4 100644 --- a/compass/db/api/host.py +++ b/compass/db/api/host.py @@ -25,12 +25,13 @@ from compass.db import models SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac'] +SUPPORTED_MACHINE_HOST_FIELDS = ['mac', 'tag', 'location', 'os_name', 'os_id'] SUPPORTED_NETOWORK_FIELDS = [ 'interface', 'ip', 'subnet', 'is_mgmt', 'is_promiscuous' ] RESP_FIELDS = [ - 'id', 'name', 'os_name', 'owner', 'mac', - 'reinstall_os', 'os_installed', 'tag', 'location', + 'id', 'name', 'os_name', 'os_id', 'owner', 'mac', + 'reinstall_os', 'os_installed', 'tag', 'location', 'networks', 'created_at', 'updated_at' ] RESP_CLUSTER_FIELDS = [ @@ -40,10 +41,19 @@ RESP_CLUSTER_FIELDS = [ 'adapter_id', 'created_at', 'updated_at' ] RESP_NETWORK_FIELDS = [ - 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous' + 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous', + 'created_at', 'updated_at' ] RESP_CONFIG_FIELDS = [ 'os_config', + 'config_setp', + 'config_validated', + 'networks', + 'created_at', + 'updated_at' +] +RESP_DEPLOY_FIELDS = [ + 'status', 'host' ] UPDATED_FIELDS = ['name', 'reinstall_os'] UPDATED_CONFIG_FIELDS = [ @@ -81,6 +91,35 @@ def list_hosts(session, lister, **filters): ) +@utils.supported_filters( + optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS) +@database.run_in_session() +@user_api.check_user_permission_in_session( + permission.PERMISSION_LIST_HOSTS +) +@utils.output_filters( + missing_ok=True, + tag=utils.general_filter_callback, + location=utils.general_filter_callback, + os_name=utils.general_filter_callback, + os_id=utils.general_filter_callback +) +@utils.wrap_to_dict(RESP_FIELDS) +def list_machines_or_hosts(session, lister, **filters): + """List hosts.""" + machines = utils.list_db_objects( + session, models.Machine, **filters + ) + machines_or_hosts = [] + for machine in machines: + host = machine.host + if host: + machines_or_hosts.append(host) + else: + machines_or_hosts.append(machine) + return machines_or_hosts + + @utils.supported_filters([]) @database.run_in_session() @user_api.check_user_permission_in_session( @@ -94,6 +133,24 @@ def get_host(session, getter, host_id, **kwargs): ) +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission_in_session( + permission.PERMISSION_LIST_HOSTS +) +@utils.wrap_to_dict(RESP_FIELDS) +def get_machine_or_host(session, getter, host_id, **kwargs): + """get host info.""" + machine = utils.get_db_object( + session, models.Machine, id=host_id + ) + host = machine.host + if host: + return host + else: + return machine + + @utils.supported_filters([]) @database.run_in_session() @user_api.check_user_permission_in_session( @@ -117,27 +174,54 @@ def _conditional_exception(host, exception_when_not_editable): return False +def is_host_validated(session, host): + if not host.config_validated: + raise exception.Forbidden( + 'host %s is not validated' % host.name + ) + + def is_host_editable( session, host, user, reinstall_os_set=False, exception_when_not_editable=True ): - with session.begin(subtransactions=True): - if reinstall_os_set: - if host.state.state == 'INSTALLING': - return _conditional_exception( - host, exception_when_not_editable - ) - elif not host.reinstall_os: - return _conditional_exception( - host, exception_when_not_editable - ) - if not user.is_admin and host.creator_id != user.id: + if reinstall_os_set: + if host.state.state == 'INSTALLING': return _conditional_exception( host, exception_when_not_editable ) + elif not host.reinstall_os: + return _conditional_exception( + host, exception_when_not_editable + ) + if not user.is_admin and host.creator_id != user.id: + return _conditional_exception( + host, exception_when_not_editable + ) return True +def validate_host(session, host): + mgmt_interface_set = False + for host_network in host.host_networks: + if host_network.is_mgmt: + if mgmt_interface_set: + raise exception.InvalidParameter( + '%s multi interfaces set mgmt ' % host.name + ) + if host_network.is_promiscuous: + raise exception.InvalidParameter( + '%s interface %s is mgmt but promiscuous' % ( + host.name, host_network.interface + ) + ) + mgmt_interface_set = True + if not mgmt_interface_set: + raise exception.InvalidParameter( + 'host has no mgmt interface' % host.name + ) + + @utils.supported_filters(UPDATED_FIELDS) @database.run_in_session() @user_api.check_user_permission_in_session( @@ -401,3 +485,87 @@ def update_host_state(session, updater, host_id, **kwargs): ) utils.update_db_object(session, host.state, **kwargs) return host.state_dict() + + +@utils.supported_filters(optional_support_keys=['poweron']) +@database.run_in_session() +@user_api.check_user_permission_in_session( + permission.PERMISSION_DEPLOY_HOST +) +@utils.wrap_to_dict( + RESP_DEPLOY_FIELDS, + host=RESP_CONFIG_FIELDS +) +def poweron_host( + session, deployer, host_id, poweron={}, **kwargs +): + """power on host.""" + from compass.tasks import client as celery_client + host = utils.get_db_object( + session, models.host, id=host_id + ) + is_host_validated(session, host) + celery_client.celery.send_task( + 'compass.tasks.poweron_host', + (host_id,) + ) + return { + 'status': 'poweron %s action sent' % host.name, + 'host': host + } + + +@utils.supported_filters(optional_support_keys=['poweroff']) +@database.run_in_session() +@user_api.check_user_permission_in_session( + permission.PERMISSION_DEPLOY_HOST +) +@utils.wrap_to_dict( + RESP_DEPLOY_FIELDS, + host=RESP_CONFIG_FIELDS +) +def poweroff_host( + session, deployer, host_id, poweroff={}, **kwargs +): + """power off host.""" + from compass.tasks import client as celery_client + host = utils.get_db_object( + session, models.host, id=host_id + ) + is_host_validated(session, host) + celery_client.celery.send_task( + 'compass.tasks.poweroff_host', + (host_id,) + ) + return { + 'status': 'poweroff %s action sent' % host.name, + 'host': host + } + + +@utils.supported_filters(optional_support_keys=['reset']) +@database.run_in_session() +@user_api.check_user_permission_in_session( + permission.PERMISSION_DEPLOY_HOST +) +@utils.wrap_to_dict( + RESP_DEPLOY_FIELDS, + host=RESP_CONFIG_FIELDS +) +def reset_host( + session, deployer, host_id, reset={}, **kwargs +): + """reset host.""" + from compass.tasks import client as celery_client + host = utils.get_db_object( + session, models.host, id=host_id + ) + is_host_validated(session, host) + celery_client.celery.send_task( + 'compass.tasks.reset_host', + (host_id,) + ) + return { + 'status': 'reset %s action sent' % host.name, + 'host': host + } diff --git a/compass/db/api/machine.py b/compass/db/api/machine.py index 9220e532..87686183 100644 --- a/compass/db/api/machine.py +++ b/compass/db/api/machine.py @@ -26,7 +26,7 @@ from compass.utils import setting_wrapper as setting from compass.utils import util -SUPPORTED_FIELDS = ['mac', 'tag'] +SUPPORTED_FIELDS = ['mac', 'tag', 'location'] UPDATED_FIELDS = ['ipmi_credentials', 'tag', 'location'] PATCHED_FIELDS = [ 'patched_ipmi_credentials', 'patched_tag', diff --git a/compass/db/api/metadata.py b/compass/db/api/metadata.py index 2ba31fb7..5fb66f85 100644 --- a/compass/db/api/metadata.py +++ b/compass/db/api/metadata.py @@ -45,10 +45,9 @@ def add_os_field_internal(session): setting.OS_FIELD_DIR, env_locals=validator.VALIDATOR_LOCALS ) - with session.begin(subtransactions=True): - return _add_field_internal( - session, models.OSConfigField, configs - ) + return _add_field_internal( + session, models.OSConfigField, configs + ) def add_package_field_internal(session): @@ -56,46 +55,46 @@ def add_package_field_internal(session): setting.PACKAGE_FIELD_DIR, env_locals=validator.VALIDATOR_LOCALS ) - with session.begin(subtransactions=True): - return _add_field_internal( - session, models.PackageConfigField, configs - ) + return _add_field_internal( + session, models.PackageConfigField, configs + ) def _add_metadata( session, field_model, metadata_model, name, config, - parent=None, adapter=None + parent=None, **kwargs ): - metadata = config.get('_self', {}) - if 'field' in metadata: + metadata_self = config.get('_self', {}) + if 'field' in metadata_self: field = utils.get_db_object( - session, field_model, field=metadata['field'] + session, field_model, field=metadata_self['field'] ) else: field = None - object = utils.add_db_object( + metadata = utils.add_db_object( session, metadata_model, True, - name, adapter=adapter, parent=parent, field=field, - display_name=metadata.get('display_name', name), - description=metadata.get('description', None), - is_required=metadata.get('is_required', False), - required_in_whole_config=metadata.get( + name, parent=parent, field=field, + display_name=metadata_self.get('display_name', name), + description=metadata_self.get('description', None), + is_required=metadata_self.get('is_required', False), + required_in_whole_config=metadata_self.get( 'required_in_whole_config', False ), - mapping_to=metadata.get('mapping_to', None), - validator=metadata.get('validator', None), - js_validator=metadata.get('js_validator', None), - default_value=metadata.get('default_value', None), - options=metadata.get('options', []), - required_in_options=metadata.get('required_in_options', False) + mapping_to=metadata_self.get('mapping_to', None), + validator=metadata_self.get('validator', None), + js_validator=metadata_self.get('js_validator', None), + default_value=metadata_self.get('default_value', None), + options=metadata_self.get('options', []), + required_in_options=metadata_self.get('required_in_options', False), + **kwargs ) for key, value in config.items(): if key not in '_self': _add_metadata( session, field_model, metadata_model, key, value, - parent=object, adapter=adapter, + parent=metadata, **kwargs ) - return object + return metadata def add_os_metadata_internal(session): @@ -104,19 +103,18 @@ def add_os_metadata_internal(session): setting.OS_METADATA_DIR, env_locals=validator.VALIDATOR_LOCALS ) - with session.begin(subtransactions=True): - for config in configs: - adapter = utils.get_db_object( - session, models.OSAdapter, name=config['ADAPTER'] - ) - for key, value in config['METADATA'].items(): - os_metadatas.append(_add_metadata( - session, models.OSConfigField, - models.OSConfigMetadata, - key, value, parent=None, - adapter=adapter - )) - return os_metadatas + for config in configs: + os = utils.get_db_object( + session, models.OperatingSystem, name=config['OS'] + ) + for key, value in config['METADATA'].items(): + os_metadatas.append(_add_metadata( + session, models.OSConfigField, + models.OSConfigMetadata, + key, value, parent=None, + os=os + )) + return os_metadatas def add_package_metadata_internal(session): @@ -125,31 +123,52 @@ def add_package_metadata_internal(session): setting.PACKAGE_METADATA_DIR, env_locals=validator.VALIDATOR_LOCALS ) - with session.begin(subtransactions=True): - for config in configs: - adapter = utils.get_db_object( - session, models.PackageAdapter, name=config['ADAPTER'] - ) - for key, value in config['METADATA'].items(): - package_metadatas.append(_add_metadata( - session, models.PackageConfigField, - models.PackageConfigMetadata, - key, value, parent=None, - adapter=adapter - )) - return package_metadatas - - -def get_metadatas_internal(session): - metadata_mapping = {} - with session.begin(subtransactions=True): - adapters = utils.list_db_objects( - session, models.Adapter + for config in configs: + adapter = utils.get_db_object( + session, models.Adapter, name=config['ADAPTER'] ) - for adapter in adapters: + for key, value in config['METADATA'].items(): + package_metadatas.append(_add_metadata( + session, models.PackageConfigField, + models.PackageConfigMetadata, + key, value, parent=None, + adapter=adapter + )) + return package_metadatas + + +def get_package_metadatas_internal(session): + metadata_mapping = {} + adapters = utils.list_db_objects( + session, models.Adapter + ) + for adapter in adapters: + if adapter.deployable: metadata_dict = adapter.metadata_dict() metadata_mapping[adapter.id] = metadata_dict - return metadata_mapping + else: + logging.info( + 'ignore metadata since its adapter %s is not deployable', + adapter.id + ) + return metadata_mapping + + +def get_os_metadatas_internal(session): + metadata_mapping = {} + oses = utils.list_db_objects( + session, models.OperatingSystem + ) + for os in oses: + if os.deployable: + metadata_dict = os.metadata_dict() + metadata_mapping[os.id] = metadata_dict + else: + logging.info( + 'ignore metadata since its os %s is not deployable', + os.id + ) + return metadata_mapping def _validate_self( diff --git a/compass/db/api/metadata_holder.py b/compass/db/api/metadata_holder.py index a3f14398..a91bacaa 100644 --- a/compass/db/api/metadata_holder.py +++ b/compass/db/api/metadata_holder.py @@ -25,41 +25,43 @@ from compass.db import exception @database.run_in_session() def load_metadatas(session): - global METADATA_MAPPING + global OS_METADATA_MAPPING + global PACKAGE_METADATA_MAPPING logging.info('load metadatas into memory') - METADATA_MAPPING = metadata_api.get_metadatas_internal(session) - - -METADATA_MAPPING = {} - - -def _validate_config( - config, adapter_id, - metadata_mapping, metadata_field, whole_check -): - if adapter_id not in metadata_mapping: - raise exception.InvalidParameter( - 'adapter id %s is not found in metadata mapping' % adapter_id - ) - metadatas = metadata_mapping[adapter_id] - if metadata_field not in metadatas: - return - metadata_api.validate_config_internal( - config, metadatas[metadata_field], whole_check + OS_METADATA_MAPPING = metadata_api.get_os_metadatas_internal(session) + PACKAGE_METADATA_MAPPING = ( + metadata_api.get_package_metadatas_internal(session) ) -def validate_os_config(config, adapter_id, whole_check=False): +OS_METADATA_MAPPING = {} +PACKAGE_METADATA_MAPPING = {} + + +def _validate_config( + config, id, metadata_mapping, whole_check +): + if id not in metadata_mapping: + raise exception.InvalidParameter( + 'adapter id %s is not found in metadata mapping' % id + ) + metadatas = metadata_mapping[id] + metadata_api.validate_config_internal( + config, metadatas, whole_check + ) + + +def validate_os_config(config, os_id, whole_check=False): _validate_config( - config, adapter_id, METADATA_MAPPING, 'os_config', + config, os_id, OS_METADATA_MAPPING, whole_check ) def validate_package_config(config, adapter_id, whole_check=False): _validate_config( - config, adapter_id, METADATA_MAPPING, - 'package_config', whole_check + config, adapter_id, PACKAGE_METADATA_MAPPING, + whole_check ) @@ -85,15 +87,38 @@ def _filter_metadata(metadata): return filtered_metadata +def get_package_metadata_internal(adapter_id): + """get package metadata internal.""" + if adapter_id not in PACKAGE_METADATA_MAPPING: + raise exception.RecordNotExists( + 'adpater %s does not exist' % adapter_id + ) + return _filter_metadata(PACKAGE_METADATA_MAPPING[adapter_id]) + + @utils.supported_filters([]) @database.run_in_session() @user_api.check_user_permission_in_session( permission.PERMISSION_LIST_METADATAS ) -def get_metadata(session, getter, adapter_id, **kwargs): - """get adapter.""" - if adapter_id not in METADATA_MAPPING: +def get_package_metadata(session, getter, adapter_id, **kwargs): + return get_package_metadata_internal(adapter_id) + + +def get_os_metadata_internal(os_id): + """get os metadata internal.""" + if os_id not in OS_METADATA_MAPPING: raise exception.RecordNotExists( - 'adpater %s does not exist' % adapter_id + 'os %s does not exist' % os_id ) - return _filter_metadata(METADATA_MAPPING[adapter_id]) + return _filter_metadata(OS_METADATA_MAPPING[os_id]) + + +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission_in_session( + permission.PERMISSION_LIST_METADATAS +) +def get_os_metadata(session, getter, os_id, **kwargs): + """get os metadatas.""" + return get_os_metadata_internal(os_id) diff --git a/compass/db/api/network.py b/compass/db/api/network.py index d7033156..7eac06a8 100644 --- a/compass/db/api/network.py +++ b/compass/db/api/network.py @@ -24,10 +24,19 @@ from compass.db import exception from compass.db import models -SUPPORTED_FIELDS = ['subnet'] -RESP_FIELDS = ['id', 'subnet', 'created_at', 'updated_at'] +SUPPORTED_FIELDS = ['subnet', 'name'] +RESP_FIELDS = [ + 'id', 'name', 'subnet', 'created_at', 'updated_at' +] ADDED_FIELDS = ['subnet'] -UPDATED_FIELDS = ['subnet'] +OPTIONAL_ADDED_FIELDS = ['name'] +IGNORE_ADDED_FIELDS = [ + 'id', 'created_at', 'updated_at' +] +UPDATED_FIELDS = ['subnet', 'name'] +IGNORE_UPDATED_FIELDS = [ + 'id', 'created_at', 'updated_at' +] def _check_subnet(subnet): @@ -65,7 +74,10 @@ def get_subnet(session, getter, subnet_id, **kwargs): ) -@utils.supported_filters(ADDED_FIELDS) +@utils.supported_filters( + ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS, + ignore_support_keys=IGNORE_ADDED_FIELDS +) @utils.input_validates(subnet=_check_subnet) @database.run_in_session() @user_api.check_user_permission_in_session( @@ -75,11 +87,14 @@ def get_subnet(session, getter, subnet_id, **kwargs): def add_subnet(session, creator, subnet, **kwargs): """Create a subnet.""" return utils.add_db_object( - session, models.Network, True, subnet + session, models.Network, True, subnet, **kwargs ) -@utils.supported_filters(UPDATED_FIELDS) +@utils.supported_filters( + optional_support_keys=UPDATED_FIELDS, + ignore_support_keys=IGNORE_UPDATED_FIELDS +) @utils.input_validates(subnet=_check_subnet) @database.run_in_session() @user_api.check_user_permission_in_session( diff --git a/compass/db/api/permission.py b/compass/db/api/permission.py index baaac6ef..3e20f7ff 100644 --- a/compass/db/api/permission.py +++ b/compass/db/api/permission.py @@ -130,6 +130,9 @@ PERMISSION_REVIEW_CLUSTER = PermissionWrapper( PERMISSION_DEPLOY_CLUSTER = PermissionWrapper( 'deploy_cluster', 'deploy cluster', 'deploy cluster' ) +PERMISSION_DEPLOY_HOST = PermissionWrapper( + 'deploy_host', 'deploy host', 'deploy host' +) PERMISSION_GET_CLUSTER_STATE = PermissionWrapper( 'get_cluster_state', 'get cluster state', 'get cluster state' ) @@ -243,6 +246,7 @@ PERMISSIONS = [ PERMISSION_DEL_HOST_NETWORK, PERMISSION_GET_HOST_STATE, PERMISSION_UPDATE_HOST_STATE, + PERMISSION_DEPLOY_HOST, PERMISSION_LIST_CLUSTERHOSTS, PERMISSION_LIST_CLUSTERHOST_CONFIG, PERMISSION_ADD_CLUSTERHOST_CONFIG, diff --git a/compass/db/api/switch.py b/compass/db/api/switch.py index 95f4ee7b..b8f3dd52 100644 --- a/compass/db/api/switch.py +++ b/compass/db/api/switch.py @@ -28,8 +28,20 @@ from compass.utils import setting_wrapper as setting SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state'] SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state'] -SUPPORTED_SWITCH_MACHINES_FIELDS = ['ip_int', 'port', 'vlans', 'mac', 'tag'] -SUPPORTED_MACHINES_FIELDS = ['port', 'vlans', 'mac', 'tag'] +SUPPORTED_SWITCH_MACHINES_FIELDS = [ + 'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location' +] +SUPPORTED_MACHINES_FIELDS = [ + 'port', 'vlans', 'mac', 'tag', 'location' +] +SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [ + 'switch_ip_int', 'port', 'vlans', 'mac', + 'tag', 'location', 'os_name', 'os_id' +] +SUPPORTED_MACHINES_HOSTS_FIELDS = [ + 'port', 'vlans', 'mac', 'tag', 'location', + 'os_name', 'os_id' +] ADDED_FIELDS = ['ip'] OPTIONAL_ADDED_FIELDS = ['credentials', 'vendor', 'state', 'err_msg'] UPDATED_FIELDS = ['credentials', 'vendor', 'state', 'err_msg'] @@ -67,10 +79,15 @@ RESP_ACTION_FIELDS = [ 'status', 'details' ] RESP_MACHINES_FIELDS = [ - 'id', 'switch_id', 'machine_id', 'port', 'vlans', 'mac', + 'id', 'switch_id', 'switch_ip', 'machine_id', 'port', 'vlans', 'mac', 'ipmi_credentials', 'tag', 'location', 'created_at', 'updated_at' ] +RESP_MACHINES_HOSTS_FIELDS = [ + 'id', 'switch_id', 'switch_ip', 'machine_id', 'port', 'vlans', 'mac', + 'ipmi_credentials', 'tag', 'location', + 'name', 'os_name', 'clusters' +] def _check_credentials_version(version): @@ -354,10 +371,9 @@ def filter_machine_internal(filters, port): def get_switch_machines_internal(session, **filters): - with session.begin(subtransactions=True): - return utils.list_db_objects( - session, models.SwitchMachine, **filters - ) + return utils.list_db_objects( + session, models.SwitchMachine, **filters + ) def _filter_port(port_filter, obj): @@ -411,9 +427,13 @@ def _filter_vlans(vlan_filter, obj): @user_api.check_user_permission_in_session( permission.PERMISSION_LIST_SWITCH_MACHINES ) -@utils.output_filters(port=_filter_port, vlans=_filter_vlans) +@utils.output_filters( + port=_filter_port, vlans=_filter_vlans, + tag=utils.general_filter_callback, + location=utils.general_filter_callback +) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) -def _list_switch_machines(session, user, switch_machines): +def _list_switch_machines(session, user, switch_machines, **filters): return [ switch_machine for switch_machine in switch_machines if filter_machine_internal( @@ -423,14 +443,59 @@ def _list_switch_machines(session, user, switch_machines): ] -@utils.supported_filters(optional_support_keys=SUPPORTED_MACHINES_FIELDS) +@user_api.check_user_permission_in_session( + permission.PERMISSION_LIST_SWITCH_MACHINES +) +@utils.output_filters( + missing_ok=True, + port=_filter_port, vlans=_filter_vlans, + tag=utils.general_filter_callback, + location=utils.general_filter_callback, + os_name=utils.general_filter_callback, + os_id=utils.general_filter_callback +) +@utils.wrap_to_dict(RESP_MACHINES_HOSTS_FIELDS) +def _list_switch_machines_hosts(session, user, switch_machines, **filters): + filtered_switch_machines = [ + switch_machine for switch_machine in switch_machines + if filter_machine_internal( + switch_machine.switch.filters, + switch_machine.port + ) + ] + switch_machines_hosts = [] + for switch_machine in filtered_switch_machines: + switch_machine_host_dict = {} + machine = switch_machine.machine + host = machine.host + if host: + clusters = [ + clusterhost.cluster + for clusterhost in host.clusterhosts + ] + switch_machine_host_dict.update( + host.to_dict() + ) + switch_machine_host_dict['clusters'] = [ + cluster.to_dict() for cluster in clusters + ] + switch_machine_host_dict.update( + switch_machine.to_dict() + ) + switch_machines_hosts.append(switch_machine_host_dict) + return switch_machines_hosts + + +@utils.supported_filters( + optional_support_keys=SUPPORTED_MACHINES_FIELDS +) @database.run_in_session() def list_switch_machines(session, getter, switch_id, **filters): """Get switch machines.""" - switch_machines = get_switch_machines_internal( + switch_machines, host_filters = get_switch_machines_internal( session, switch_id=switch_id, **filters ) - return _list_switch_machines(session, getter, switch_machines) + return _list_switch_machines(session, getter, switch_machines, **filters) @utils.supported_filters( @@ -442,7 +507,35 @@ def list_switchmachines(session, lister, **filters): switch_machines = get_switch_machines_internal( session, **filters ) - return _list_switch_machines(session, lister, switch_machines) + return _list_switch_machines(session, lister, switch_machines, **filters) + + +@utils.supported_filters( + optional_support_keys=SUPPORTED_MACHINES_HOSTS_FIELDS +) +@database.run_in_session() +def list_switch_machines_hosts(session, getter, switch_id, **filters): + """Get switch machines hosts.""" + switch_machines = get_switch_machines_internal( + session, switch_id=switch_id, **filters + ) + return _list_switch_machines_hosts( + session, getter, switch_machines, **filters + ) + + +@utils.supported_filters( + optional_support_keys=SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS +) +@database.run_in_session() +def list_switchmachines_hosts(session, lister, **filters): + """List switch machines hosts.""" + switch_machines = get_switch_machines_internal( + session, **filters + ) + return _list_switch_machines_hosts( + session, lister, switch_machines, **filters + ) def add_switch_machines_internal( @@ -505,7 +598,7 @@ def add_switch_machine(session, creator, switch_id, mac, **kwargs): return switch_machines[0] -@utils.supported_filters() +@utils.supported_filters(optional_support_keys=['find_machines']) @database.run_in_session() @user_api.check_user_permission_in_session( permission.PERMISSION_UPDATE_SWITCH_MACHINES @@ -520,7 +613,7 @@ def poll_switch_machines(session, poller, switch_id, **kwargs): (switch.ip, switch.credentials) ) return { - 'status': 'find_machines action sent', + 'status': 'action %s sent' % kwargs, 'details': { } } diff --git a/compass/db/api/user.py b/compass/db/api/user.py index 62e42b2c..905bca71 100644 --- a/compass/db/api/user.py +++ b/compass/db/api/user.py @@ -218,9 +218,14 @@ class UserWrapper(UserMixin): @database.run_in_session() def get_user_object(session, email, **kwargs): - user_dict = utils.get_db_object( - session, models.User, email=email - ).to_dict() + user = utils.get_db_object( + session, models.User, False, email=email + ) + if not user: + raise exception.Unauthorized( + '%s unauthorized' % email + ) + user_dict = user.to_dict() user_dict.update(kwargs) return UserWrapper(**user_dict) @@ -231,9 +236,13 @@ def get_user_object_from_token(session, token): 'ge': datetime.datetime.now() } user_token = utils.get_db_object( - session, models.UserToken, + session, models.UserToken, False, token=token, expire_timestamp=expire_timestamp ) + if not user_token: + raise exception.Forbidden( + 'invalid user token: %s' % token + ) user_dict = utils.get_db_object( session, models.User, id=user_token.user_id ).to_dict() diff --git a/compass/db/api/utils.py b/compass/db/api/utils.py index 68dd6759..c3857313 100644 --- a/compass/db/api/utils.py +++ b/compass/db/api/utils.py @@ -81,7 +81,10 @@ def _between_condition(col_attr, value): def model_filter(query, model, **filters): for key, value in filters.items(): - col_attr = getattr(model, key) + if hasattr(model, key): + col_attr = getattr(model, key) + else: + continue if isinstance(value, list): query = query.filter(col_attr.in_(value)) elif isinstance(value, dict): @@ -148,19 +151,24 @@ def model_filter(query, model, **filters): return query -def wrap_to_dict(support_keys=[]): +def wrap_to_dict(support_keys=[], **filters): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - return _wrapper_dict(func(*args, **kwargs), support_keys) + return _wrapper_dict( + func(*args, **kwargs), support_keys, **filters + ) return wrapper return decorator -def _wrapper_dict(data, support_keys): +def _wrapper_dict(data, support_keys, **filters): """Helper for warpping db object into dictionary.""" if isinstance(data, list): - return [_wrapper_dict(item, support_keys) for item in data] + return [ + _wrapper_dict(item, support_keys, **filters) + for item in data + ] if isinstance(data, models.HelperMixin): data = data.to_dict() if not isinstance(data, dict): @@ -170,18 +178,27 @@ def _wrapper_dict(data, support_keys): info = {} for key in support_keys: if key in data: - info[key] = data[key] + if key in filters: + info[key] = _wrapper_dict(data[key], filters[key]) + else: + info[key] = data[key] return info -def supported_filters(support_keys=[], optional_support_keys=[]): +def supported_filters( + support_keys=[], + optional_support_keys=[], + ignore_support_keys=[] +): def decorator(func): @functools.wraps(func) def wrapper(*args, **filters): must_support_keys = set(support_keys) all_support_keys = must_support_keys | set(optional_support_keys) filter_keys = set(filters) - unsupported_keys = filter_keys - all_support_keys + unsupported_keys = ( + filter_keys - all_support_keys - set(ignore_support_keys) + ) if unsupported_keys: raise exception.InvalidParameter( 'filter keys %s are not supported' % str( @@ -195,7 +212,12 @@ def supported_filters(support_keys=[], optional_support_keys=[]): list(missing_keys) ) ) - return func(*args, **filters) + filtered_filters = dict([ + (key, value) + for key, value in filters.items() + if key not in ignore_support_keys + ]) + return func(*args, **filtered_filters) return wrapper return decorator @@ -252,14 +274,17 @@ def general_filter_callback(general_filter, obj): return True -def filter_output(filter_callbacks, filters, obj): +def filter_output(filter_callbacks, filters, obj, missing_ok=False): for callback_key, callback_value in filter_callbacks.items(): if callback_key not in filters: continue if callback_key not in obj: - raise exception.InvalidResponse( - '%s is not in %s' % (callback_key, obj) - ) + if missing_ok: + continue + else: + raise exception.InvalidResponse( + '%s is not in %s' % (callback_key, obj) + ) if not callback_value( filters[callback_key], obj[callback_key] ): @@ -267,14 +292,16 @@ def filter_output(filter_callbacks, filters, obj): return True -def output_filters(**filter_callbacks): +def output_filters(missing_ok=False, **filter_callbacks): def decorator(func): @functools.wraps(func) def wrapper(*args, **filters): filtered_obj_list = [] obj_list = func(*args, **filters) for obj in obj_list: - if filter_output(filter_callbacks, filters, obj): + if filter_output( + filter_callbacks, filters, obj, missing_ok + ): filtered_obj_list.append(obj) return filtered_obj_list return wrapper @@ -430,7 +457,7 @@ def del_db_objects(session, table, **filters): model_query(session, table), table, **filters ) db_objects = query.all() - query.delete() + query.delete(synchronize_session=False) return db_objects diff --git a/compass/db/models.py b/compass/db/models.py index 5fcd70be..702502e9 100644 --- a/compass/db/models.py +++ b/compass/db/models.py @@ -108,10 +108,6 @@ class MetadataMixin(HelperMixin): super(MetadataMixin, self).initialize() def validate(self): - if not self.adapter: - raise exception.InvalidParameter( - 'adapter is not set in os metadata %s' % self.id - ) super(MetadataMixin, self).validate() @property @@ -252,67 +248,6 @@ class FieldMixin(HelperMixin): return dict_info -class AdapterMixin(HelperMixin): - name = Column(String(80), unique=True) - - @property - def root_metadatas(self): - return [ - metadata for metadata in self.metadatas - if metadata.parent_id is None - ] - - @property - def adapter_installer(self): - if self.installer: - return self.installer - elif self.parent: - return self.parent.adapter_installer - else: - return None - - @property - def installer_name(self): - installer = self.adapter_installer - if installer: - return installer.name - else: - return '' - - @property - def installer_type(self): - installer = self.adapter_installer - if installer: - return installer.installer_type - else: - return None - - @property - def installer_config(self): - installer = self.adapter_installer - if installer: - return installer.config - else: - return None - - def metadata_dict(self): - dict_info = {} - if self.parent: - dict_info.update(self.parent.metadata_dict()) - for metadata in self.root_metadatas: - dict_info.update(metadata.to_dict()) - return dict_info - - def to_dict(self): - dict_info = super(AdapterMixin, self).to_dict() - dict_info.update({ - 'installer_name': self.installer_name, - 'installer_type': self.installer_type, - 'installer_config': self.installer_config - }) - return dict_info - - class InstallerMixin(HelperMixin): name = Column(String(80), unique=True) installer_type = Column(String(80)) @@ -611,15 +546,12 @@ class Host(BASE, TimestampMixin, HelperMixin): __tablename__ = 'host' name = Column(String(80), unique=True) - adapter_id = Column(Integer, ForeignKey('os_adapter.id')) + os_id = Column(Integer, ForeignKey('os.id')) config_step = Column(String(80), default='') os_config = Column(JSONEncoded, default={}) config_validated = Column(Boolean, default=False) deployed_os_config = Column(JSONEncoded, default={}) - os_id = Column( - Integer, - ForeignKey('os.id') - ) + os_name = Column(String(80)) creator_id = Column(Integer, ForeignKey('user.id')) id = Column( Integer, @@ -686,32 +618,23 @@ class Host(BASE, TimestampMixin, HelperMixin): super(Host, self).initialize() def validate(self): - adapter = self.adapter - if not adapter: + os = self.os + if not os: raise exception.InvalidParameter( - 'adapter is not set in host %s' % self.id + 'os is not set in host %s' % self.id ) - if not self.os: - if adapter: - self.os = adapter.adapter_os - else: - raise exception.InvalidParameter( - 'os is not set in host %s' % self.id - ) - if not self.creator: + if not os.deployable: + raise exception.InvalidParameter( + 'os %s is not deployable' % os.name + ) + self.os_name = os.name + creator = self.creator + if not creator: raise exception.InvalidParameter( 'creator is not set in host %s' % self.id ) super(Host, self).validate() - @hybrid_property - def os_name(self): - os = self.os - if os: - return os.name - else: - return None - @hybrid_property def owner(self): creator = self.creator @@ -739,9 +662,12 @@ class Host(BASE, TimestampMixin, HelperMixin): dict_info = self.machine.to_dict() dict_info.update(super(Host, self).to_dict()) dict_info.update({ - 'os_name': self.os_name, 'owner': self.owner, 'os_installed': self.os_installed, + 'networks': [ + host_network.to_dict() + for host_network in self.host_networks + ] }) return dict_info @@ -810,14 +736,19 @@ class Cluster(BASE, TimestampMixin, HelperMixin): reinstall_distributed_system = Column(Boolean, default=True) config_step = Column(String(80), default='') os_id = Column(Integer, ForeignKey('os.id'), nullable=True) + os_name = Column(String(80), nullable=True) distributed_system_id = Column( Integer, ForeignKey('distributed_system.id'), nullable=True ) + distributed_system_name = Column( + String(80), nullable=True + ) os_config = Column(JSONEncoded, default={}) package_config = Column(JSONEncoded, default={}) config_validated = Column(Boolean, default=False) adapter_id = Column(Integer, ForeignKey('adapter.id')) + adapter_name = Column(String(80), nullable=True) creator_id = Column(Integer, ForeignKey('user.id')) clusterhosts = relationship( ClusterHost, @@ -843,31 +774,52 @@ class Cluster(BASE, TimestampMixin, HelperMixin): super(Cluster, self).initialize() def validate(self): - adapter = self.adapter - if not adapter: - raise exception.InvalidParameter( - 'adapter is not set in cluster %s' % self.id - ) creator = self.creator if not creator: raise exception.InvalidParameter( 'creator is not set in cluster %s' % self.id ) os = self.os - if not os: - os_adapter = adapter.os_adapter - if os_adapter: - self.os = os_adapter.adapter_os - else: - self.os = None - if not self.distributed_system: - package_adapter = adapter.package_adapter - if package_adapter: - self.distributed_system = ( - package_adapter.adapter_distributed_system + if os: + if not os.deployable: + raise exception.InvalidParameter( + 'os %s is not deployable' % os.name ) - else: - self.distributed_system = None + self.os_name = os.name + else: + self.os_name = None + adapter = self.adapter + if adapter: + if not adapter.deployable: + raise exception.InvalidParameter( + 'adapter %s is not deployable' % adapter.name + ) + supported_os_ids = [ + adapter_os.os.id for adapter_os in adapter.supported_oses + ] + if os and os.id not in supported_os_ids: + raise exception.InvalidParameter( + 'os %s is not supported' % os.name + ) + self.adapter_name = adapter.name + distributed_system = ( + adapter.adapter_distributed_system + ) + self.distributed_system = distributed_system + if distributed_system: + if not distributed_system.deployable: + raise exception.InvalidParamerter( + 'distributed system %s is not deployable' % ( + distributed_system.name + ) + ) + self.distributed_system_name = ( + distributed_system.name + ) + else: + self.adapter_name = None + self.distributed_system = None + self.distributed_system_name = None super(Cluster, self).validate() @property @@ -914,22 +866,6 @@ class Cluster(BASE, TimestampMixin, HelperMixin): else: return None - @hybrid_property - def os_name(self): - os = self.os - if os: - return os.name - else: - return None - - @hybrid_property - def distributed_system_name(self): - distributed_system = self.distributed_system - if distributed_system: - return distributed_system.name - else: - return None - @property def distributed_system_installed(self): state = self.state @@ -948,8 +884,6 @@ class Cluster(BASE, TimestampMixin, HelperMixin): def to_dict(self): dict_info = super(Cluster, self).to_dict() dict_info.update({ - 'os_name': self.os_name, - 'distributed_system_name': self.distributed_system_name, 'distributed_system_installed': self.distributed_system_installed, 'owner': self.owner, }) @@ -1192,10 +1126,18 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin): def switch_ip_int(self): return self.switch.ip_int + @switch_ip_int.expression + def switch_ip_int(cls): + return Switch.ip_int + @hybrid_property def switch_vendor(self): return self.switch.vendor + @switch_vendor.expression + def switch_vendor(cls): + return Switch.vendor + @property def patched_vlans(self): return self.vlans @@ -1213,6 +1155,7 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin): def to_dict(self): dict_info = self.machine.to_dict() dict_info.update(super(SwitchMachine, self).to_dict()) + dict_info['switch_ip'] = self.switch.ip return dict_info @@ -1348,70 +1291,15 @@ class Switch(BASE, HelperMixin, TimestampMixin): return dict_info -class Adapter(BASE, HelperMixin): - """Adpater table.""" - __tablename__ = 'adapter' - - id = Column(Integer, primary_key=True) - package_adapter_id = Column( - Integer, - ForeignKey( - 'package_adapter.id', onupdate='CASCADE', ondelete='CASCADE' - ), - nullable=True - ) - os_adapter_id = Column( - Integer, - ForeignKey( - 'os_adapter.id', onupdate='CASCADE', ondelete='CASCADE' - ), - nullable=True - ) - - __table_args__ = ( - UniqueConstraint( - 'package_adapter_id', 'os_adapter_id', name='constraint' - ), - ) - - clusters = relationship( - Cluster, - backref=backref('adapter') - ) - - def __init__(self, os_adapter_id, package_adapter_id, **kwargs): - self.os_adapter_id = os_adapter_id - self.package_adapter_id = package_adapter_id - super(Adapter, self).__init__(**kwargs) - - def metadata_dict(self): - dict_info = {} - if self.os_adapter: - dict_info['os_config'] = self.os_adapter.metadata_dict() - if self.package_adapter: - dict_info['package_config'] = self.package_adapter.metadata_dict() - return dict_info - - def to_dict(self): - dict_info = super(Adapter, self).to_dict() - os_adapter = self.os_adapter - if os_adapter: - dict_info['os_adapter'] = os_adapter.to_dict() - package_adapter = self.package_adapter - if package_adapter: - dict_info['package_adapter'] = package_adapter.to_dict() - return dict_info - - class OSConfigMetadata(BASE, MetadataMixin): """OS config metadata.""" __tablename__ = "os_config_metadata" id = Column(Integer, primary_key=True) - adapter_id = Column( + os_id = Column( Integer, ForeignKey( - 'os_adapter.id', onupdate='CASCADE', ondelete='CASCADE' + 'os.id', onupdate='CASCADE', ondelete='CASCADE' ) ) parent_id = Column( @@ -1432,13 +1320,20 @@ class OSConfigMetadata(BASE, MetadataMixin): backref=backref('parent', remote_side=id) ) __table_args__ = ( - UniqueConstraint('path', 'adapter_id', name='constraint'), + UniqueConstraint('path', 'os_id', name='constraint'), ) def __init__(self, name, **kwargs): self.name = name super(OSConfigMetadata, self).__init__(**kwargs) + def validate(self): + if not self.os: + raise exception.InvalidParameter( + 'os is not set in os metadata %s' % self.id + ) + super(OSConfigMetadata, self).validate() + class OSConfigField(BASE, FieldMixin): """OS config fields.""" @@ -1455,108 +1350,37 @@ class OSConfigField(BASE, FieldMixin): super(OSConfigField, self).__init__(**kwargs) -class OSAdapter(BASE, AdapterMixin): - """OS adpater table.""" - __tablename__ = 'os_adapter' +class AdapterOS(BASE, HelperMixin): + """Adapter OS table.""" + __tablename__ = 'adapter_os' id = Column(Integer, primary_key=True) - parent_id = Column( - Integer, - ForeignKey('os_adapter.id', onupdate='CASCADE', ondelete='CASCADE'), - nullable=True - ) os_id = Column( Integer, - ForeignKey('os.id', onupdate='CASCADE', ondelete='CASCADE'), - nullable=True + ForeignKey( + 'os.id', + onupdate='CASCADE', ondelete='CASCADE' + ) ) - installer_id = Column( + adapter_id = Column( Integer, - ForeignKey('os_installer.id', onupdate='CASCADE', ondelete='CASCADE'), - nullable=True - ) - children = relationship( - 'OSAdapter', - passive_deletes=True, passive_updates=True, - backref=backref('parent', remote_side=id) - ) - adapters = relationship( - Adapter, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('os_adapter') - ) - metadatas = relationship( - OSConfigMetadata, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('adapter') - ) - hosts = relationship( - Host, - backref=backref('adapter') + ForeignKey( + 'adapter.id', + onupdate='CASCADE', ondelete='CASCADE' + ) ) - __table_args__ = ( - UniqueConstraint('os_id', 'installer_id', name='constraint'), - ) - - def __init__(self, name, **kwargs): - self.name = name - super(OSAdapter, self).__init__(**kwargs) - - @property - def deployable(self): - os = self.adapter_os - installer = self.adapter_installer - if ( - os and os.deployable and installer - ): - return True - else: - return False - - @property - def adapter_os(self): - os = self.os - if os: - return os - parent = self.parent - if parent: - return parent.adapter_os - else: - return None - - @property - def os_name(self): - os = self.adapter_os - if os: - return os.name - else: - return '' + def __init__(self, os_id, adapter_id, **kwargs): + self.os_id = os_id + self.adapter_id = adapter_id + super(AdapterOS, self).__init__(**kwargs) def to_dict(self): - dict_info = super(OSAdapter, self).to_dict() - dict_info['os_name'] = self.os_name + dict_info = self.os.to_dict() + dict_info.update(super(AdapterOS, self).to_dict()) return dict_info -class OSInstaller(BASE, InstallerMixin): - """OS installer table.""" - __tablename__ = 'os_installer' - id = Column(Integer, primary_key=True) - adpaters = relationship( - OSAdapter, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('installer') - ) - - def __init__(self, name, **kwargs): - self.name = name - super(OSInstaller, self).__init__(**kwargs) - - class OperatingSystem(BASE, HelperMixin): """OS table.""" __tablename__ = 'os' @@ -1569,8 +1393,9 @@ class OperatingSystem(BASE, HelperMixin): ) name = Column(String(80), unique=True) deployable = Column(Boolean, default=False) - adapters = relationship( - OSAdapter, + + metadatas = relationship( + OSConfigMetadata, passive_deletes=True, passive_updates=True, cascade='all, delete-orphan', backref=backref('os') @@ -1588,24 +1413,45 @@ class OperatingSystem(BASE, HelperMixin): passive_deletes=True, passive_updates=True, backref=backref('parent', remote_side=id) ) + supported_adapters = relationship( + AdapterOS, + passive_deletes=True, passive_updates=True, + backref=backref('os') + ) def __init__(self, name): self.name = name super(OperatingSystem, self).__init__() + @property + def root_metadatas(self): + return [ + metadata for metadata in self.metadatas + if metadata.parent_id is None + ] -class PackageAdapterRole(BASE, HelperMixin): + def metadata_dict(self): + dict_info = {} + if self.parent: + dict_info.update(self.parent.metadata_dict()) + for metadata in self.root_metadatas: + dict_info.update(metadata.to_dict()) + return dict_info + + +class AdapterRole(BASE, HelperMixin): """Adapter's roles.""" - __tablename__ = "package_adapter_role" + __tablename__ = "adapter_role" id = Column(Integer, primary_key=True) name = Column(String(80)) + display_name = Column(String(80)) description = Column(Text) optional = Column(Boolean) adapter_id = Column( Integer, ForeignKey( - 'package_adapter.id', + 'adapter.id', onupdate='CASCADE', ondelete='CASCADE' ) @@ -1618,7 +1464,13 @@ class PackageAdapterRole(BASE, HelperMixin): def __init__(self, name, adapter_id, **kwargs): self.name = name self.adapter_id = adapter_id - super(PackageAdapterRole, self).__init__(**kwargs) + super(AdapterRole, self).__init__(**kwargs) + + def initialize(self): + if not self.description: + self.description = self.name + if not self.display_name: + self.display_name = self.name class PackageConfigMetadata(BASE, MetadataMixin): @@ -1629,7 +1481,7 @@ class PackageConfigMetadata(BASE, MetadataMixin): adapter_id = Column( Integer, ForeignKey( - 'package_adapter.id', + 'adapter.id', onupdate='CASCADE', ondelete='CASCADE' ) ) @@ -1663,6 +1515,13 @@ class PackageConfigMetadata(BASE, MetadataMixin): self.name = name super(PackageConfigMetadata, self).__init__(**kwargs) + def validate(self): + if not self.adapter: + raise exception.InvalidParameter( + 'adapter is not set in package metadata %s' % self.id + ) + super(PackageConfigMetadata, self).validate() + class PackageConfigField(BASE, FieldMixin): """Adapter cofig metadata fields.""" @@ -1679,16 +1538,17 @@ class PackageConfigField(BASE, FieldMixin): super(PackageConfigField, self).__init__(**kwargs) -class PackageAdapter(BASE, AdapterMixin): +class Adapter(BASE, HelperMixin): """Adapter table.""" - __tablename__ = 'package_adapter' + __tablename__ = 'adapter' id = Column(Integer, primary_key=True) name = Column(String(80), unique=True) + display_name = Column(String(80)) parent_id = Column( Integer, ForeignKey( - 'package_adapter.id', + 'adapter.id', onupdate='CASCADE', ondelete='CASCADE' ), nullable=True @@ -1701,7 +1561,15 @@ class PackageAdapter(BASE, AdapterMixin): ), nullable=True ) - installer_id = Column( + os_installer_id = Column( + Integer, + ForeignKey( + 'os_installer.id', + onupdate='CASCADE', ondelete='CASCADE' + ), + nullable=True + ) + package_installer_id = Column( Integer, ForeignKey( 'package_installer.id', @@ -1709,35 +1577,43 @@ class PackageAdapter(BASE, AdapterMixin): ), nullable=True ) - supported_os_patterns = Column(JSONEncoded, nullable=True) + deployable = Column( + Boolean, default=False + ) + + supported_oses = relationship( + AdapterOS, + passive_deletes=True, passive_updates=True, + cascade='all, delete-orphan', + backref=backref('adapter') + ) roles = relationship( - PackageAdapterRole, + AdapterRole, passive_deletes=True, passive_updates=True, cascade='all, delete-orphan', backref=backref('adapter') ) children = relationship( - 'PackageAdapter', + 'Adapter', passive_deletes=True, passive_updates=True, backref=backref('parent', remote_side=id) ) - adapters = relationship( - Adapter, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('package_adapter') - ) metadatas = relationship( PackageConfigMetadata, passive_deletes=True, passive_updates=True, cascade='all, delete-orphan', backref=backref('adapter') ) + clusters = relationship( + Cluster, + backref=backref('adapter') + ) + __table_args__ = ( UniqueConstraint( 'distributed_system_id', - 'installer_id', name='constraint' + 'os_installer_id', 'package_installer_id', name='constraint' ), ) @@ -1745,19 +1621,92 @@ class PackageAdapter(BASE, AdapterMixin): self, name, **kwargs ): self.name = name - super(PackageAdapter, self).__init__(**kwargs) + super(Adapter, self).__init__(**kwargs) + + def initialize(self): + if not self.display_name: + self.display_name = self.name @property - def deployable(self): - distributed_system = self.adapter_distributed_system - installer = self.adapter_installer - if ( - distributed_system and distributed_system.deployable and - installer - ): - return True + def root_metadatas(self): + return [ + metadata for metadata in self.metadatas + if metadata.parent_id is None + ] + + def metadata_dict(self): + dict_info = {} + if self.parent: + dict_info.update(self.parent.metadata_dict()) + for metadata in self.root_metadatas: + dict_info.update(metadata.to_dict()) + return dict_info + + @property + def adapter_package_installer(self): + if self.package_installer: + return self.package_installer + elif self.parent: + return self.parent.adapter_package_installer else: - return False + return None + + @property + def adapter_os_installer(self): + if self.os_installer: + return self.os_installer + elif self.parent: + return self.parent.adapter_os_installer + else: + return None + + @property + def package_installer_name(self): + installer = self.adapter_package_installer + if installer: + return installer.name + else: + return None + + @property + def os_installer_name(self): + installer = self.adapter_os_installer + if installer: + return installer.name + else: + return None + + @property + def package_installer_type(self): + installer = self.adapter_package_installer + if installer: + return installer.installer_type + else: + return None + + @property + def os_installer_type(self): + installer = self.adapter_os_installer + if installer: + return installer.installer_type + else: + return None + + @property + def package_installer_config(self): + installer = self.adapter_package_installer + if installer: + return installer.config + else: + return None + + @property + def os_installer_config(self): + installer = self.adapter_os_installer + if installer: + return installer.config + else: + return None @property def adapter_distributed_system(self): @@ -1776,16 +1725,16 @@ class PackageAdapter(BASE, AdapterMixin): if distributed_system: return distributed_system.name else: - return '' + return None @property - def adapter_supported_os_patterns(self): - supported_os_patterns = self.supported_os_patterns - if supported_os_patterns: - return supported_os_patterns + def adapter_supported_oses(self): + supported_oses = self.supported_oses + if supported_oses: + return supported_oses parent = self.parent if parent: - return parent.adapter_supported_os_patterns + return parent.adapter_supported_oses else: return [] @@ -1801,13 +1750,22 @@ class PackageAdapter(BASE, AdapterMixin): return [] def to_dict(self): - dict_info = super(PackageAdapter, self).to_dict() - roles = [] - for role in self.adapter_roles: - roles.append(role.to_dict()) - dict_info['roles'] = roles - dict_info['supported_os_patterns'] = self.adapter_supported_os_patterns - dict_info['distributed_system'] = self.distributed_system_name + dict_info = super(Adapter, self).to_dict() + adapter_roles = self.adapter_roles + supported_oses = self.adapter_supported_oses + dict_info.update({ + 'roles': [role.to_dict() for role in adapter_roles], + 'supported_oses': [ + adapter_os.to_dict() for adapter_os in supported_oses + ], + 'distributed_system_name': self.distributed_system_name, + 'os_installer_name': self.os_installer_name, + 'os_installer_type': self.os_installer_type, + 'os_installer_config': self.os_installer_config, + 'package_installer_name': self.package_installer_name, + 'package_installer_type': self.package_installer_type, + 'package_installer_config': self.package_installer_config + }) return dict_info @@ -1825,9 +1783,10 @@ class DistributedSystem(BASE, HelperMixin): nullable=True ) name = Column(String(80), unique=True) - deployable = Column(Boolean, default=False) + deployable = Column(String(80), default=False) + adapters = relationship( - PackageAdapter, + Adapter, passive_deletes=True, passive_updates=True, cascade='all, delete-orphan', backref=backref('distributed_system') @@ -1847,15 +1806,31 @@ class DistributedSystem(BASE, HelperMixin): super(DistributedSystem, self).__init__() +class OSInstaller(BASE, InstallerMixin): + """OS installer table.""" + __tablename__ = 'os_installer' + id = Column(Integer, primary_key=True) + adpaters = relationship( + Adapter, + passive_deletes=True, passive_updates=True, + cascade='all, delete-orphan', + backref=backref('os_installer') + ) + + def __init__(self, name, **kwargs): + self.name = name + super(OSInstaller, self).__init__(**kwargs) + + class PackageInstaller(BASE, InstallerMixin): """package installer table.""" __tablename__ = 'package_installer' id = Column(Integer, primary_key=True) adapters = relationship( - PackageAdapter, + Adapter, passive_deletes=True, passive_updates=True, cascade='all, delete-orphan', - backref=backref('installer') + backref=backref('package_installer') ) def __init__(self, name, **kwargs): @@ -1868,6 +1843,7 @@ class Network(BASE, TimestampMixin, HelperMixin): __tablename__ = 'network' id = Column(Integer, primary_key=True) + name = Column(String(80), unique=True) subnet = Column(String(80), unique=True) host_networks = relationship( @@ -1882,10 +1858,14 @@ class Network(BASE, TimestampMixin, HelperMixin): super(Network, self).__init__(**kwargs) def intialize(self): + if not self.name: + self.name = self.subnet + super(Network, self).intialize() + + def validate(self): try: netaddr.IPNetwork(self.subnet) except Exception: raise exception.InvalidParameter( 'subnet %s format is uncorrect' % self.subnet ) - super(Network, self).intialize() diff --git a/compass/tasks/tasks.py b/compass/tasks/tasks.py index 079df5b1..19dcf05b 100644 --- a/compass/tasks/tasks.py +++ b/compass/tasks/tasks.py @@ -18,25 +18,31 @@ """ import logging +from celery.signals import celeryd_init from celery.signals import setup_logging from compass.actions import deploy from compass.actions import poll_switch from compass.actions import reinstall +from compass.db.api import adapter_holder as adapter_api +from compass.db.api import database +from compass.db.api import metadata_holder as metadata_api + from compass.tasks.client import celery from compass.utils import flags from compass.utils import logsetting from compass.utils import setting_wrapper as setting -def tasks_setup_logging(**_): - """Setup logging options from compass setting.""" +@celeryd_init.connect() +def global_celery_init(**_): + """Initialization code.""" flags.init() flags.OPTIONS.logfile = setting.CELERY_LOGFILE logsetting.init() - - -setup_logging.connect(tasks_setup_logging) + database.init() + adapter_api.load_adapters() + metadata_api.load_metadatas() @celery.task(name='compass.tasks.pollswitch') @@ -60,27 +66,51 @@ def pollswitch(ip_addr, credentials, req_obj='mac', oper='SCAN'): logging.exception(error) -@celery.task(name='compass.tasks.deploy') -def deploy_clusters(cluster_hosts): +@celery.task(name='compass.tasks.deploy_cluster') +def deploy_cluster(cluster_id, clusterhost_ids): """Deploy the given cluster. :param cluster_hosts: the cluster and hosts of each cluster to deploy. :type cluster_hosts: dict of int to list of int """ - try: - deploy.deploy(cluster_hosts) - except Exception as error: - logging.exception(error) + pass -@celery.task(name='compass.tasks.reinstall') -def reinstall_clusters(cluster_hosts): +@celery.task(name='compass.tasks.reinstall_cluster') +def reinstall_cluster(cluster_id, clusterhost_ids): """reinstall the given cluster. :param cluster_hosts: the cluster and hosts of each cluster to reinstall. :type cluster_hosts: dict of int to list of int """ - try: - reinstall.reinstall(cluster_hosts) - except Exception as error: - logging.exception(error) + pass + + +@celery.task(name='compass.tasks.poweron_host') +def poweron_host(host_id): + """Deploy the given cluster. + + :param cluster_hosts: the cluster and hosts of each cluster to deploy. + :type cluster_hosts: dict of int to list of int + """ + pass + + +@celery.task(name='compass.tasks.poweroff_host') +def poweroff_host(host_id): + """Deploy the given cluster. + + :param cluster_hosts: the cluster and hosts of each cluster to deploy. + :type cluster_hosts: dict of int to list of int + """ + pass + + +@celery.task(name='compass.tasks.reset_host') +def reset_host(host_id): + """Deploy the given cluster. + + :param cluster_hosts: the cluster and hosts of each cluster to deploy. + :type cluster_hosts: dict of int to list of int + """ + pass diff --git a/compass/tests/db/api/test_utils.py b/compass/tests/db/api/test_utils.py index 52e7a9b8..1b058745 100644 --- a/compass/tests/db/api/test_utils.py +++ b/compass/tests/db/api/test_utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import os import unittest2 @@ -144,9 +145,11 @@ class TestModelFilter(unittest2.TestCase): expected, ret = self._filter_test_dict_util( 'gt', 'update_clusterhost_state', - 47, - id=46 + 48, + id=47 ) + print 'expected: %s' % expected + print 'ret: %s' % ret self.assertTrue( all(item in ret[0].items() for item in expected.items()) ) @@ -166,9 +169,11 @@ class TestModelFilter(unittest2.TestCase): expected, ret = self._filter_test_dict_util( 'ge', 'update_clusterhost_state', - 47, - id=47 + 48, + id=48 ) + print 'expected: %s' % expected + print 'ret: %s' % ret self.assertTrue( all(item in ret[0].items() for item in expected.items()) ) @@ -387,7 +392,7 @@ class TestAddDbObject(unittest2.TestCase): with database.session() as session: db_objs = utils.add_db_object( session, - models.PackageAdapterRole, + models.AdapterRole, True, 'test1', 1, diff --git a/compass/utils/setting_wrapper.py b/compass/utils/setting_wrapper.py index 493a191d..3e382656 100644 --- a/compass/utils/setting_wrapper.py +++ b/compass/utils/setting_wrapper.py @@ -63,13 +63,12 @@ OS_INSTALLER_DIR = '/etc/compass/os_installer' PACKAGE_INSTALLER_DIR = '/etc/compass/package_installer' OS_DIR = '/etc/compass/os' DISTRIBUTED_SYSTEM_DIR = '/etc/compass/distributed_system' -OS_ADAPTER_DIR = '/etc/compass/os_adapter' -PACKAGE_ADAPTER_DIR = '/etc/compass/package_adapter' +ADAPTER_DIR = '/etc/compass/adapter' OS_METADATA_DIR = '/etc/compass/os_metadata' PACKAGE_METADATA_DIR = '/etc/compass/package_metadata' OS_FIELD_DIR = '/etc/compass/os_field' PACKAGE_FIELD_DIR = '/etc/compass/package_field' -PACKAGE_ROLE_DIR = '/etc/compass/role' +ADAPTER_ROLE_DIR = '/etc/compass/role' VALIDATOR_DIR = '/etc/compass/validator' if ( 'COMPASS_IGNORE_SETTING' in os.environ and diff --git a/conf/package_adapter/ceph.conf b/conf/adapter/ceph.conf similarity index 100% rename from conf/package_adapter/ceph.conf rename to conf/adapter/ceph.conf diff --git a/conf/adapter/chef_ceph.conf b/conf/adapter/chef_ceph.conf new file mode 100644 index 00000000..4855afc1 --- /dev/null +++ b/conf/adapter/chef_ceph.conf @@ -0,0 +1,7 @@ +NAME = 'ceph(chef)' +DSPLAY_NAME = 'ceph(ceph)' +PARENT = 'ceph' +PACKAGE_INSTALLER = 'chef(icehouse)' +OS_INSTALLER = 'cobbler' +SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*'] +DEPLOYABLE = True diff --git a/conf/adapter/chef_openstack.conf b/conf/adapter/chef_openstack.conf new file mode 100644 index 00000000..3a32af04 --- /dev/null +++ b/conf/adapter/chef_openstack.conf @@ -0,0 +1,7 @@ +NAME = 'openstack(chef)' +DISPLAY_NAME = 'openstack(chef)' +PARENT = 'openstack' +PACKAGE_INSTALLER = 'chef(icehouse)' +OS_INSTALLER = 'cobbler' +SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*'] +DEPLOYABLE = True diff --git a/conf/os_adapter/general.conf b/conf/adapter/general.conf similarity index 53% rename from conf/os_adapter/general.conf rename to conf/adapter/general.conf index 6cbeaef9..4d8cb371 100644 --- a/conf/os_adapter/general.conf +++ b/conf/adapter/general.conf @@ -1,2 +1 @@ NAME = 'general' -OS = 'general' diff --git a/conf/package_adapter/openstack.conf b/conf/adapter/openstack.conf similarity index 60% rename from conf/package_adapter/openstack.conf rename to conf/adapter/openstack.conf index 038c6d4c..2f948fb0 100644 --- a/conf/package_adapter/openstack.conf +++ b/conf/adapter/openstack.conf @@ -1,3 +1,4 @@ NAME = 'openstack' PARENT = 'general' DISTRIBUTED_SYSTEM = 'openstack' +SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04'] diff --git a/conf/adapter/os_only.conf b/conf/adapter/os_only.conf new file mode 100644 index 00000000..96295538 --- /dev/null +++ b/conf/adapter/os_only.conf @@ -0,0 +1,5 @@ +NAME = 'os_only' +PARENT = 'general' +OS_INSTALLER = 'cobbler' +SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*'] +DEPLOYABLE = True diff --git a/conf/os_adapter/centos.conf b/conf/os_adapter/centos.conf deleted file mode 100644 index 11a1bdf4..00000000 --- a/conf/os_adapter/centos.conf +++ /dev/null @@ -1,3 +0,0 @@ -NAME = 'CentOS' -PARENT = 'general' -OS = 'CentOS' diff --git a/conf/os_adapter/cobbler_centos.conf b/conf/os_adapter/cobbler_centos.conf deleted file mode 100644 index 1946ce01..00000000 --- a/conf/os_adapter/cobbler_centos.conf +++ /dev/null @@ -1,3 +0,0 @@ -NAME = 'CentOS(cobbler)' -PARENT = 'CentOS' -INSTALLER = 'cobbler' diff --git a/conf/os_adapter/cobbler_centos6.5.conf b/conf/os_adapter/cobbler_centos6.5.conf deleted file mode 100644 index ac63f69a..00000000 --- a/conf/os_adapter/cobbler_centos6.5.conf +++ /dev/null @@ -1,3 +0,0 @@ -NAME = 'CentOS6.5(cobbler)' -PARENT = 'CentOS(cobbler)' -OS = 'CentOS6.5' diff --git a/conf/os_adapter/cobbler_ubuntu.conf b/conf/os_adapter/cobbler_ubuntu.conf deleted file mode 100644 index 465fe763..00000000 --- a/conf/os_adapter/cobbler_ubuntu.conf +++ /dev/null @@ -1,3 +0,0 @@ -NAME = 'Ubuntu(cobbler)' -PARENT = 'Ubuntu' -INSTALLER = 'cobbler' diff --git a/conf/os_adapter/cobbler_ubuntu12.04.conf b/conf/os_adapter/cobbler_ubuntu12.04.conf deleted file mode 100644 index ff1559db..00000000 --- a/conf/os_adapter/cobbler_ubuntu12.04.conf +++ /dev/null @@ -1,3 +0,0 @@ -NAME = 'Ubuntu12.04(cobbler)' -PARENT = 'Ubuntu(cobbler)' -OS = 'Ubuntu12.04' diff --git a/conf/os_adapter/ubuntu.conf b/conf/os_adapter/ubuntu.conf deleted file mode 100644 index c8f31b51..00000000 --- a/conf/os_adapter/ubuntu.conf +++ /dev/null @@ -1,3 +0,0 @@ -NAME = 'Ubuntu' -PARENT = 'general' -OS = 'Ubuntu' diff --git a/conf/os_metadata/general.conf b/conf/os_metadata/general.conf index 28719e5b..920b63f0 100644 --- a/conf/os_metadata/general.conf +++ b/conf/os_metadata/general.conf @@ -1,4 +1,4 @@ -ADAPTER = 'general' +OS = 'general' METADATA = { 'general': { '_self': { @@ -9,7 +9,6 @@ METADATA = { 'field': 'general', 'default_value': 'EN', 'options': ['EN'], - 'required_in_options': True } }, 'timezone': { @@ -17,7 +16,6 @@ METADATA = { 'field': 'general', 'default_value': 'PDT', 'options': ['PDT'], - 'required_in_options': True } }, 'domain': { @@ -26,7 +24,6 @@ METADATA = { 'is_required' : True, 'default_value': 'ods.com', 'options': ['ods.com'], - 'required_in_options': True } }, 'default_gateway': { @@ -58,7 +55,6 @@ METADATA = { '_self': { 'required_in_whole_config': True, 'options': ['/boot', 'swap', '/var', '/home'], - 'required_in_options': True }, '$partition': { '_self': { diff --git a/conf/package_adapter/chef_ceph.conf b/conf/package_adapter/chef_ceph.conf deleted file mode 100644 index ebc8f831..00000000 --- a/conf/package_adapter/chef_ceph.conf +++ /dev/null @@ -1,4 +0,0 @@ -NAME = 'ceph(chef)' -PARENT = 'ceph' -INSTALLER = 'chef(icehouse)' -SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*'] diff --git a/conf/package_adapter/chef_openstack.conf b/conf/package_adapter/chef_openstack.conf deleted file mode 100644 index 766ba221..00000000 --- a/conf/package_adapter/chef_openstack.conf +++ /dev/null @@ -1,4 +0,0 @@ -NAME = 'chef_openstack' -PARENT = 'openstack' -INSTALLER = 'chef(icehouse)' -SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*'] diff --git a/conf/package_adapter/general.conf b/conf/package_adapter/general.conf deleted file mode 100644 index f51ea0a3..00000000 --- a/conf/package_adapter/general.conf +++ /dev/null @@ -1,2 +0,0 @@ -NAME = 'general' -DISTRIBUTED_SYSTEM = 'general' diff --git a/conf/role/openstack_chef.conf b/conf/role/openstack_chef.conf index b0b916c6..4979cd0d 100644 --- a/conf/role/openstack_chef.conf +++ b/conf/role/openstack_chef.conf @@ -1,30 +1,39 @@ -ADAPTER_NAME = 'chef_openstack' +ADAPTER_NAME = 'openstack(chef)' ROLES = [{ 'role': 'os-compute-worker', + 'display_name': 'compute node', 'description': 'compute node' }, { 'role': 'os-network', + 'display_name': 'network node', 'description': 'network node' }, { 'role': 'os-block-storage-worker', + 'display_name': 'storage node', 'description': 'storage node' }, { 'role': 'os-image', + 'display_name': 'image node', 'description': 'image node' }, { 'role': 'os-compute-vncproxy', + 'display_name': 'vnc proxy node', 'description': 'vnc proxy node' }, { 'role': 'os-controller', + 'display_name': 'controller node', 'description': 'controller node' }, { 'role': 'os-ops-messaging', + 'display_name': 'message queue node', 'description': 'message queue node' }, { 'role': 'os-ops-database', + 'display_name': 'database node', 'description': 'database node' }, { 'role': 'ha-proxy', + 'display_name': 'ha proxy node', 'description': 'ha proxy node', 'optional': True }]