From ae593ca66301d1d9aaa06ae6bd4f7062c4723758 Mon Sep 17 00:00:00 2001 From: Tanvir Talukder Date: Mon, 12 Dec 2016 08:50:24 -0600 Subject: [PATCH] Initial commit --- .coveragerc | 8 + .gitignore | 109 + .testr.conf | 7 + doc/LICENSE | 176 ++ etc/valet/api/app.apache2 | 29 + etc/valet/api/app.wsgi | 4 + etc/valet/api/config.py | 102 + .../notification_listener.py | 24 + etc/valet/valet.conf | 136 ++ requirements.txt | 14 + run_all_tests.sh | 1 + run_examples.sh | 14 + run_test.sh | 8 + run_until_fail.sh | 4 + setup.cfg | 31 + setup.py | 33 + test-requirements.txt | 29 + tools/conf.d/HAValet.conf | 6 + tools/conf.d/music.conf | 15 + tools/utils/cleandb.sh | 16 + tools/utils/populate.cql | 23 + tox.ini | 66 + valet/__init__.py | 0 valet/api/PKG-INFO | 4 + valet/api/__init__.py | 0 valet/api/app.py | 44 + valet/api/common/__init__.py | 22 + valet/api/common/compute.py | 32 + valet/api/common/hooks.py | 111 + valet/api/common/i18n.py | 23 + valet/api/common/identity.py | 155 ++ valet/api/common/identity.py.save | 147 ++ valet/api/common/messaging.py | 43 + valet/api/common/ostro_helper.py | 315 +++ valet/api/conf.py | 69 + valet/api/db/__init__.py | 0 valet/api/db/models/__init__.py | 23 + valet/api/db/models/music/__init__.py | 303 +++ valet/api/db/models/music/groups.py | 94 + valet/api/db/models/music/music.py | 335 +++ valet/api/db/models/music/ostro.py | 180 ++ valet/api/db/models/music/placements.py | 101 + valet/api/db/models/music/plans.py | 98 + valet/api/v1/__init__.py | 0 valet/api/v1/commands/__init__.py | 0 valet/api/v1/commands/populate.py | 72 + valet/api/v1/controllers/__init__.py | 128 ++ valet/api/v1/controllers/errors.py | 140 ++ valet/api/v1/controllers/groups.py | 321 +++ valet/api/v1/controllers/placements.py | 196 ++ valet/api/v1/controllers/plans.py | 284 +++ valet/api/v1/controllers/root.py | 90 + valet/api/v1/controllers/status.py | 90 + valet/api/v1/controllers/v1.py | 130 ++ valet/api/wsgi.py | 57 + valet/cli/__init__.py | 0 valet/cli/groupcli.py | 187 ++ valet/cli/valetcli.py | 37 + valet/engine/__init__.py | 0 valet/engine/conf.py | 82 + valet/engine/groups/__init__.py | 0 valet/engine/listener/PKG-INFO | 4 + valet/engine/listener/__init__.py | 0 valet/engine/listener/listener_manager.py | 165 ++ valet/engine/listener/oslo_messages.py | 95 + valet/engine/optimizer/__init__.py | 0 .../engine/optimizer/app_manager/__init__.py | 0 .../optimizer/app_manager/app_handler.py | 285 +++ .../optimizer/app_manager/app_topology.py | 219 ++ .../app_manager/app_topology_base.py | 257 +++ .../app_manager/app_topology_parser.py | 641 ++++++ .../optimizer/app_manager/application.py | 62 + valet/engine/optimizer/db_connect/__init__.py | 0 valet/engine/optimizer/db_connect/client.cfg | 17 + .../optimizer/db_connect/configuration.py | 73 + valet/engine/optimizer/db_connect/event.py | 150 ++ .../optimizer/db_connect/music_handler.py | 702 ++++++ valet/engine/optimizer/ostro/__init__.py | 0 .../optimizer/ostro/constraint_solver.py | 554 +++++ .../optimizer/ostro/openstack_filters.py | 246 +++ .../engine/optimizer/ostro/openstack_utils.py | 90 + valet/engine/optimizer/ostro/optimizer.py | 196 ++ valet/engine/optimizer/ostro/ostro.py | 633 ++++++ valet/engine/optimizer/ostro/search.py | 1959 +++++++++++++++++ valet/engine/optimizer/ostro/search_base.py | 300 +++ .../engine/optimizer/ostro_server/__init__.py | 0 .../optimizer/ostro_server/configuration.py | 269 +++ valet/engine/optimizer/ostro_server/daemon.py | 163 ++ .../optimizer/ostro_server/db_cleaner.py | 151 ++ .../optimizer/ostro_server/ostro_daemon.py | 75 + .../optimizer/ostro_server/ostro_sim.cfg | 25 + valet/engine/optimizer/util/__init__.py | 0 valet/engine/optimizer/util/util.py | 89 + valet/engine/resource_manager/__init__.py | 0 valet/engine/resource_manager/compute.py | 335 +++ .../resource_manager/compute_manager.py | 406 ++++ .../resource_manager/compute_simulator.py | 101 + valet/engine/resource_manager/resource.py | 933 ++++++++ .../engine/resource_manager/resource_base.py | 684 ++++++ .../resource_manager/simulation/__init__.py | 0 .../simulation/compute_simulator.py | 135 ++ .../simulation/topology_simulator.py | 144 ++ valet/engine/resource_manager/topology.py | 197 ++ .../resource_manager/topology_manager.py | 489 ++++ .../resource_manager/topology_simulator.py | 144 ++ valet/ha/__init__.py | 0 valet/ha/ha_valet.cfg | 66 + valet/ha/ha_valet.py | 505 +++++ valet/ha/ha_valet2.cfg | 67 + valet/tests/__init__.py | 0 valet/tests/api/README.md | 15 + valet/tests/api/Valet.json.postman_collection | 770 +++++++ valet/tests/api/__init__.py | 0 valet/tests/api/config.py | 84 + valet/tests/api/conftest.py | 199 ++ valet/tests/api/controllers/__init__.py | 27 + valet/tests/api/controllers/test_plans.py | 100 + valet/tests/base.py | 40 + valet/tests/functional/__init__.py | 0 .../tests/functional/etc/valet_validator.cfg | 48 + .../functional/valet_validator/__init__.py | 0 .../valet_validator/common/__init__.py | 37 + .../functional/valet_validator/common/auth.py | 51 + .../functional/valet_validator/common/init.py | 95 + .../valet_validator/common/resources.py | 88 + .../valet_validator/compute/__init__.py | 0 .../valet_validator/compute/analyzer.py | 163 ++ .../valet_validator/group_api/__init__.py | 0 .../valet_validator/group_api/valet_group.py | 106 + .../valet_validator/orchestration/__init__.py | 0 .../valet_validator/orchestration/loader.py | 84 + .../valet_validator/tests/__init__.py | 0 .../valet_validator/tests/functional_base.py | 77 + .../valet_validator/tests/sanityCheck | 13 + .../tests/templates/affinity_ 3_Instances.yml | 56 + .../templates/affinity_basic_2_instances.yml | 41 + .../templates/diversity_basic_2_instances.yml | 51 + .../diversity_between_2_affinity.yml | 76 + .../exclusivity_basic_2_instances.yml | 48 + .../tests/templates/sanityTemplate | 42 + .../valet_validator/tests/test_affinity.py | 34 + .../tests/test_affinity_3_Instances.py | 33 + .../valet_validator/tests/test_diversity.py | 35 + .../valet_validator/tests/test_exclusivity.py | 34 + .../valet_validator/tests/test_groups.py | 74 + .../valet_validator/tests/test_nested.py | 34 + valet/tests/tempest/README.rst | 70 + valet/tests/tempest/__init__.py | 0 valet/tests/tempest/api/__init__.py | 0 valet/tests/tempest/api/base.py | 55 + valet/tests/tempest/api/disabled_test_plan.py | 115 + valet/tests/tempest/api/test_groups.py | 133 ++ valet/tests/tempest/api/test_members.py | 139 ++ valet/tests/tempest/config.py | 48 + valet/tests/tempest/plugin.py | 45 + valet/tests/tempest/scenario/__init__.py | 0 valet/tests/tempest/scenario/analyzer.py | 206 ++ .../tests/tempest/scenario/general_logger.py | 41 + valet/tests/tempest/scenario/resources.py | 88 + valet/tests/tempest/scenario/scenario_base.py | 154 ++ .../tempest/scenario/templates/__init__.py | 0 .../templates/affinity_basic_2_instances.env | 3 + .../templates/affinity_basic_2_instances.yml | 48 + .../templates/diversity_basic_2_instances.env | 3 + .../templates/diversity_basic_2_instances.yml | 58 + .../diversity_between_2_affinity.env | 3 + .../diversity_between_2_affinity.yml | 83 + .../exclusivity_basic_2_instances.env | 3 + .../exclusivity_basic_2_instances.yml | 55 + .../tests/tempest/scenario/tests/__init__.py | 0 .../tempest/scenario/tests/test_affinity.py | 15 + .../tempest/scenario/tests/test_diversity.py | 15 + .../scenario/tests/test_exclusivity.py | 15 + .../tempest/scenario/tests/test_nested.py | 15 + valet/tests/tempest/scenario/valet_group.py | 105 + valet/tests/tempest/services/__init__.py | 0 valet/tests/tempest/services/client.py | 126 ++ valet/tests/unit/__init__.py | 0 valet/tests/unit/api/__init__.py | 0 valet/tests/unit/api/common/__init__.py | 0 valet/tests/unit/api/common/test_hooks.py | 76 + valet/tests/unit/api/common/test_identity.py | 53 + valet/tests/unit/api/common/test_messaging.py | 27 + .../unit/api/common/test_ostro_helper.py | 140 ++ valet/tests/unit/api/db/__init__.py | 0 valet/tests/unit/api/db/test_groups.py | 48 + valet/tests/unit/api/db/test_ostro.py | 99 + valet/tests/unit/api/db/test_placements.py | 47 + valet/tests/unit/api/db/test_plans.py | 41 + valet/tests/unit/api/v1/__init__.py | 0 valet/tests/unit/api/v1/api_base.py | 23 + valet/tests/unit/api/v1/test_groups.py | 231 ++ valet/tests/unit/api/v1/test_placements.py | 105 + valet/tests/unit/api/v1/test_plans.py | 93 + valet/tests/unit/api/v1/test_root.py | 41 + valet/tests/unit/api/v1/test_status.py | 43 + valet/tests/unit/api/v1/test_v1.py | 67 + valet/tests/unit/cli/__init__.py | 0 valet/tests/unit/cli/test_groupcli.py | 22 + valet/tests/unit/cli/test_valetcli.py | 29 + valet/tests/unit/engine/__init__.py | 0 valet/tests/unit/engine/empty.cfg | 0 valet/tests/unit/engine/invalid.cfg | 2 + valet/tests/unit/engine/test_config.py | 42 + valet/tests/unit/engine/test_ostro.cfg | 150 ++ valet/tests/unit/engine/test_search.py | 25 + valet/tests/unit/engine/test_topology.py | 60 + valet/tests/unit/test_general.py | 11 + valet_plugins/.coveragerc | 8 + valet_plugins/.gitignore | 106 + valet_plugins/.testr.conf | 7 + valet_plugins/LICENSE | 176 ++ valet_plugins/README | 57 + valet_plugins/RELEASE | 26 + valet_plugins/requirements.txt | 6 + valet_plugins/setup.cfg | 34 + valet_plugins/setup.py | 32 + valet_plugins/test-requirements.txt | 25 + valet_plugins/tox.ini | 56 + valet_plugins/valet_plugins/PKG-INFO | 4 + valet_plugins/valet_plugins/__init__.py | 0 .../valet_plugins/common/__init__.py | 0 .../valet_plugins/common/valet_api.py | 172 ++ .../valet_plugins/heat/GroupAssignment.py | 105 + valet_plugins/valet_plugins/heat/README.md | 188 ++ valet_plugins/valet_plugins/heat/__init__.py | 0 .../valet_plugins/plugins/__init__.py | 0 .../valet_plugins/plugins/heat/__init__.py | 0 .../valet_plugins/plugins/heat/plugins.py | 158 ++ .../valet_plugins/plugins/nova/__init__.py | 0 .../plugins/nova/valet_filter.py | 222 ++ valet_plugins/valet_plugins/tests/__init__.py | 0 valet_plugins/valet_plugins/tests/base.py | 49 + .../valet_plugins/tests/unit/__init__.py | 4 + .../tests/unit/mocks/heat/__init__.py | 0 .../tests/unit/mocks/heat/common/__init__.py | 0 .../tests/unit/mocks/heat/common/i18n.py | 7 + .../tests/unit/mocks/heat/engine/__init__.py | 0 .../mocks/heat/engine/lifecycle_plugin.py | 25 + .../tests/unit/mocks/nova/__init__.py | 0 .../tests/unit/mocks/nova/i18n.py | 33 + .../unit/mocks/nova/scheduler/__init__.py | 0 .../unit/mocks/nova/scheduler/filters.py | 24 + .../valet_plugins/tests/unit/test_plugins.py | 57 + .../tests/unit/test_valet_api.py | 32 + .../tests/unit/test_valet_filter.py | 71 + 246 files changed, 23749 insertions(+) create mode 100644 .coveragerc create mode 100644 .gitignore create mode 100644 .testr.conf create mode 100644 doc/LICENSE create mode 100644 etc/valet/api/app.apache2 create mode 100644 etc/valet/api/app.wsgi create mode 100644 etc/valet/api/config.py create mode 100644 etc/valet/openstack/notification_listener/notification_listener.py create mode 100644 etc/valet/valet.conf create mode 100644 requirements.txt create mode 100644 run_all_tests.sh create mode 100644 run_examples.sh create mode 100644 run_test.sh create mode 100644 run_until_fail.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 test-requirements.txt create mode 100644 tools/conf.d/HAValet.conf create mode 100644 tools/conf.d/music.conf create mode 100644 tools/utils/cleandb.sh create mode 100644 tools/utils/populate.cql create mode 100644 tox.ini create mode 100644 valet/__init__.py create mode 100644 valet/api/PKG-INFO create mode 100644 valet/api/__init__.py create mode 100644 valet/api/app.py create mode 100644 valet/api/common/__init__.py create mode 100644 valet/api/common/compute.py create mode 100644 valet/api/common/hooks.py create mode 100644 valet/api/common/i18n.py create mode 100644 valet/api/common/identity.py create mode 100644 valet/api/common/identity.py.save create mode 100644 valet/api/common/messaging.py create mode 100644 valet/api/common/ostro_helper.py create mode 100644 valet/api/conf.py create mode 100644 valet/api/db/__init__.py create mode 100644 valet/api/db/models/__init__.py create mode 100644 valet/api/db/models/music/__init__.py create mode 100644 valet/api/db/models/music/groups.py create mode 100644 valet/api/db/models/music/music.py create mode 100644 valet/api/db/models/music/ostro.py create mode 100644 valet/api/db/models/music/placements.py create mode 100644 valet/api/db/models/music/plans.py create mode 100644 valet/api/v1/__init__.py create mode 100644 valet/api/v1/commands/__init__.py create mode 100644 valet/api/v1/commands/populate.py create mode 100644 valet/api/v1/controllers/__init__.py create mode 100644 valet/api/v1/controllers/errors.py create mode 100644 valet/api/v1/controllers/groups.py create mode 100644 valet/api/v1/controllers/placements.py create mode 100644 valet/api/v1/controllers/plans.py create mode 100644 valet/api/v1/controllers/root.py create mode 100644 valet/api/v1/controllers/status.py create mode 100644 valet/api/v1/controllers/v1.py create mode 100644 valet/api/wsgi.py create mode 100644 valet/cli/__init__.py create mode 100644 valet/cli/groupcli.py create mode 100755 valet/cli/valetcli.py create mode 100644 valet/engine/__init__.py create mode 100644 valet/engine/conf.py create mode 100644 valet/engine/groups/__init__.py create mode 100644 valet/engine/listener/PKG-INFO create mode 100644 valet/engine/listener/__init__.py create mode 100644 valet/engine/listener/listener_manager.py create mode 100644 valet/engine/listener/oslo_messages.py create mode 100644 valet/engine/optimizer/__init__.py create mode 100644 valet/engine/optimizer/app_manager/__init__.py create mode 100755 valet/engine/optimizer/app_manager/app_handler.py create mode 100755 valet/engine/optimizer/app_manager/app_topology.py create mode 100755 valet/engine/optimizer/app_manager/app_topology_base.py create mode 100755 valet/engine/optimizer/app_manager/app_topology_parser.py create mode 100755 valet/engine/optimizer/app_manager/application.py create mode 100644 valet/engine/optimizer/db_connect/__init__.py create mode 100644 valet/engine/optimizer/db_connect/client.cfg create mode 100644 valet/engine/optimizer/db_connect/configuration.py create mode 100644 valet/engine/optimizer/db_connect/event.py create mode 100644 valet/engine/optimizer/db_connect/music_handler.py create mode 100644 valet/engine/optimizer/ostro/__init__.py create mode 100755 valet/engine/optimizer/ostro/constraint_solver.py create mode 100755 valet/engine/optimizer/ostro/openstack_filters.py create mode 100755 valet/engine/optimizer/ostro/openstack_utils.py create mode 100755 valet/engine/optimizer/ostro/optimizer.py create mode 100755 valet/engine/optimizer/ostro/ostro.py create mode 100755 valet/engine/optimizer/ostro/search.py create mode 100755 valet/engine/optimizer/ostro/search_base.py create mode 100644 valet/engine/optimizer/ostro_server/__init__.py create mode 100755 valet/engine/optimizer/ostro_server/configuration.py create mode 100644 valet/engine/optimizer/ostro_server/daemon.py create mode 100644 valet/engine/optimizer/ostro_server/db_cleaner.py create mode 100755 valet/engine/optimizer/ostro_server/ostro_daemon.py create mode 100644 valet/engine/optimizer/ostro_server/ostro_sim.cfg create mode 100644 valet/engine/optimizer/util/__init__.py create mode 100755 valet/engine/optimizer/util/util.py create mode 100644 valet/engine/resource_manager/__init__.py create mode 100755 valet/engine/resource_manager/compute.py create mode 100755 valet/engine/resource_manager/compute_manager.py create mode 100644 valet/engine/resource_manager/compute_simulator.py create mode 100755 valet/engine/resource_manager/resource.py create mode 100755 valet/engine/resource_manager/resource_base.py create mode 100644 valet/engine/resource_manager/simulation/__init__.py create mode 100644 valet/engine/resource_manager/simulation/compute_simulator.py create mode 100644 valet/engine/resource_manager/simulation/topology_simulator.py create mode 100755 valet/engine/resource_manager/topology.py create mode 100755 valet/engine/resource_manager/topology_manager.py create mode 100644 valet/engine/resource_manager/topology_simulator.py create mode 100644 valet/ha/__init__.py create mode 100644 valet/ha/ha_valet.cfg create mode 100644 valet/ha/ha_valet.py create mode 100644 valet/ha/ha_valet2.cfg create mode 100644 valet/tests/__init__.py create mode 100644 valet/tests/api/README.md create mode 100644 valet/tests/api/Valet.json.postman_collection create mode 100644 valet/tests/api/__init__.py create mode 100644 valet/tests/api/config.py create mode 100644 valet/tests/api/conftest.py create mode 100644 valet/tests/api/controllers/__init__.py create mode 100644 valet/tests/api/controllers/test_plans.py create mode 100644 valet/tests/base.py create mode 100644 valet/tests/functional/__init__.py create mode 100644 valet/tests/functional/etc/valet_validator.cfg create mode 100644 valet/tests/functional/valet_validator/__init__.py create mode 100644 valet/tests/functional/valet_validator/common/__init__.py create mode 100644 valet/tests/functional/valet_validator/common/auth.py create mode 100644 valet/tests/functional/valet_validator/common/init.py create mode 100644 valet/tests/functional/valet_validator/common/resources.py create mode 100644 valet/tests/functional/valet_validator/compute/__init__.py create mode 100644 valet/tests/functional/valet_validator/compute/analyzer.py create mode 100644 valet/tests/functional/valet_validator/group_api/__init__.py create mode 100644 valet/tests/functional/valet_validator/group_api/valet_group.py create mode 100644 valet/tests/functional/valet_validator/orchestration/__init__.py create mode 100644 valet/tests/functional/valet_validator/orchestration/loader.py create mode 100644 valet/tests/functional/valet_validator/tests/__init__.py create mode 100644 valet/tests/functional/valet_validator/tests/functional_base.py create mode 100644 valet/tests/functional/valet_validator/tests/sanityCheck create mode 100644 valet/tests/functional/valet_validator/tests/templates/affinity_ 3_Instances.yml create mode 100644 valet/tests/functional/valet_validator/tests/templates/affinity_basic_2_instances.yml create mode 100644 valet/tests/functional/valet_validator/tests/templates/diversity_basic_2_instances.yml create mode 100644 valet/tests/functional/valet_validator/tests/templates/diversity_between_2_affinity.yml create mode 100644 valet/tests/functional/valet_validator/tests/templates/exclusivity_basic_2_instances.yml create mode 100644 valet/tests/functional/valet_validator/tests/templates/sanityTemplate create mode 100644 valet/tests/functional/valet_validator/tests/test_affinity.py create mode 100644 valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py create mode 100644 valet/tests/functional/valet_validator/tests/test_diversity.py create mode 100644 valet/tests/functional/valet_validator/tests/test_exclusivity.py create mode 100644 valet/tests/functional/valet_validator/tests/test_groups.py create mode 100644 valet/tests/functional/valet_validator/tests/test_nested.py create mode 100644 valet/tests/tempest/README.rst create mode 100644 valet/tests/tempest/__init__.py create mode 100644 valet/tests/tempest/api/__init__.py create mode 100644 valet/tests/tempest/api/base.py create mode 100644 valet/tests/tempest/api/disabled_test_plan.py create mode 100644 valet/tests/tempest/api/test_groups.py create mode 100644 valet/tests/tempest/api/test_members.py create mode 100644 valet/tests/tempest/config.py create mode 100644 valet/tests/tempest/plugin.py create mode 100644 valet/tests/tempest/scenario/__init__.py create mode 100644 valet/tests/tempest/scenario/analyzer.py create mode 100644 valet/tests/tempest/scenario/general_logger.py create mode 100644 valet/tests/tempest/scenario/resources.py create mode 100644 valet/tests/tempest/scenario/scenario_base.py create mode 100644 valet/tests/tempest/scenario/templates/__init__.py create mode 100644 valet/tests/tempest/scenario/templates/affinity_basic_2_instances.env create mode 100644 valet/tests/tempest/scenario/templates/affinity_basic_2_instances.yml create mode 100644 valet/tests/tempest/scenario/templates/diversity_basic_2_instances.env create mode 100644 valet/tests/tempest/scenario/templates/diversity_basic_2_instances.yml create mode 100644 valet/tests/tempest/scenario/templates/diversity_between_2_affinity.env create mode 100644 valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml create mode 100644 valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.env create mode 100644 valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.yml create mode 100644 valet/tests/tempest/scenario/tests/__init__.py create mode 100644 valet/tests/tempest/scenario/tests/test_affinity.py create mode 100644 valet/tests/tempest/scenario/tests/test_diversity.py create mode 100644 valet/tests/tempest/scenario/tests/test_exclusivity.py create mode 100644 valet/tests/tempest/scenario/tests/test_nested.py create mode 100644 valet/tests/tempest/scenario/valet_group.py create mode 100644 valet/tests/tempest/services/__init__.py create mode 100644 valet/tests/tempest/services/client.py create mode 100644 valet/tests/unit/__init__.py create mode 100644 valet/tests/unit/api/__init__.py create mode 100644 valet/tests/unit/api/common/__init__.py create mode 100644 valet/tests/unit/api/common/test_hooks.py create mode 100644 valet/tests/unit/api/common/test_identity.py create mode 100644 valet/tests/unit/api/common/test_messaging.py create mode 100644 valet/tests/unit/api/common/test_ostro_helper.py create mode 100644 valet/tests/unit/api/db/__init__.py create mode 100644 valet/tests/unit/api/db/test_groups.py create mode 100644 valet/tests/unit/api/db/test_ostro.py create mode 100644 valet/tests/unit/api/db/test_placements.py create mode 100644 valet/tests/unit/api/db/test_plans.py create mode 100644 valet/tests/unit/api/v1/__init__.py create mode 100644 valet/tests/unit/api/v1/api_base.py create mode 100644 valet/tests/unit/api/v1/test_groups.py create mode 100644 valet/tests/unit/api/v1/test_placements.py create mode 100644 valet/tests/unit/api/v1/test_plans.py create mode 100644 valet/tests/unit/api/v1/test_root.py create mode 100644 valet/tests/unit/api/v1/test_status.py create mode 100644 valet/tests/unit/api/v1/test_v1.py create mode 100644 valet/tests/unit/cli/__init__.py create mode 100644 valet/tests/unit/cli/test_groupcli.py create mode 100644 valet/tests/unit/cli/test_valetcli.py create mode 100644 valet/tests/unit/engine/__init__.py create mode 100644 valet/tests/unit/engine/empty.cfg create mode 100644 valet/tests/unit/engine/invalid.cfg create mode 100644 valet/tests/unit/engine/test_config.py create mode 100644 valet/tests/unit/engine/test_ostro.cfg create mode 100644 valet/tests/unit/engine/test_search.py create mode 100644 valet/tests/unit/engine/test_topology.py create mode 100644 valet/tests/unit/test_general.py create mode 100644 valet_plugins/.coveragerc create mode 100644 valet_plugins/.gitignore create mode 100644 valet_plugins/.testr.conf create mode 100644 valet_plugins/LICENSE create mode 100644 valet_plugins/README create mode 100644 valet_plugins/RELEASE create mode 100644 valet_plugins/requirements.txt create mode 100644 valet_plugins/setup.cfg create mode 100644 valet_plugins/setup.py create mode 100644 valet_plugins/test-requirements.txt create mode 100644 valet_plugins/tox.ini create mode 100644 valet_plugins/valet_plugins/PKG-INFO create mode 100644 valet_plugins/valet_plugins/__init__.py create mode 100644 valet_plugins/valet_plugins/common/__init__.py create mode 100644 valet_plugins/valet_plugins/common/valet_api.py create mode 100644 valet_plugins/valet_plugins/heat/GroupAssignment.py create mode 100644 valet_plugins/valet_plugins/heat/README.md create mode 100644 valet_plugins/valet_plugins/heat/__init__.py create mode 100644 valet_plugins/valet_plugins/plugins/__init__.py create mode 100644 valet_plugins/valet_plugins/plugins/heat/__init__.py create mode 100644 valet_plugins/valet_plugins/plugins/heat/plugins.py create mode 100644 valet_plugins/valet_plugins/plugins/nova/__init__.py create mode 100644 valet_plugins/valet_plugins/plugins/nova/valet_filter.py create mode 100644 valet_plugins/valet_plugins/tests/__init__.py create mode 100644 valet_plugins/valet_plugins/tests/base.py create mode 100644 valet_plugins/valet_plugins/tests/unit/__init__.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/heat/__init__.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/heat/common/__init__.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/__init__.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/nova/__init__.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/__init__.py create mode 100644 valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py create mode 100644 valet_plugins/valet_plugins/tests/unit/test_plugins.py create mode 100644 valet_plugins/valet_plugins/tests/unit/test_valet_api.py create mode 100644 valet_plugins/valet_plugins/tests/unit/test_valet_filter.py diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..436cd4f --- /dev/null +++ b/.coveragerc @@ -0,0 +1,8 @@ +[run] +branch = True +source = valet +omit = valet/tests/* +cover_pylib = True + +[report] +ignore_errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..81fc93f --- /dev/null +++ b/.gitignore @@ -0,0 +1,109 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] + +#ignore thumbnails created by windows +Thumbs.db +#Ignore files build by Visual Studio +*.obj +*.exe +*.pdb +*.user +*.aps +*.pch +*.vspscc +*_i.c +*_p.c +*.ncb +*.suo +*.tlb +*.tlh +*.bak +*.cache +*.ilk +*.log +# C extensions +*.so +*.pid + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.eggs/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml +ostro-daemon.pid +.project +.pydevproject +.testrepository +.settings + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +#ignore thumbnails created by windows +Thumbs.db +#Ignore files build by Visual Studio +*.obj +*.exe +*.pdb +*.user +*.aps +*.pch +*.vspscc +*_i.c +*_p.c +*.ncb +*.suo +*.tlb +*.tlh +*.bak +*.cache +*.ilk +[Bb]in +[Dd]ebug*/ +*.lib +*.sbr +obj/ +[Rr]elease*/ +_ReSharper*/ +[Tt]est[Rr]esult* +.idea/* diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 0000000..ba78ba2 --- /dev/null +++ b/.testr.conf @@ -0,0 +1,7 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-1000} \ + ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./valet/tests/unit} -t . $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/doc/LICENSE b/doc/LICENSE new file mode 100644 index 0000000..68c771a --- /dev/null +++ b/doc/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/etc/valet/api/app.apache2 b/etc/valet/api/app.apache2 new file mode 100644 index 0000000..8cc8d13 --- /dev/null +++ b/etc/valet/api/app.apache2 @@ -0,0 +1,29 @@ +# valet user/group required (or substitute as needed). +# Place in /opt/apache2/sites-available, symlink from +# /opt/apache2/sites-enabled, and run 'apachectl restart' as root. +# Optional: Append python-path=PATH_TO_VENV_PACKAGES to WSGIDaemonProcess + +Listen 8090 +ServerName valet + + + + ServerName valet + WSGIDaemonProcess valet user=m04060 group=m04060 threads=5 + WSGIScriptAlias / /var/www/valet/app.wsgi + + SetEnv APACHE_RUN_USER m04060 + SetEnv APACHE_RUN_GROUP m04060 + WSGIProcessGroup valet + + + WSGIProcessGroup valet + WSGIApplicationGroup %{GLOBAL} + Order deny,allow + Allow from all + + + ErrorLog /var/log/valet/api.log + LogLevel warn + CustomLog /var/log/valet/access.log combined + diff --git a/etc/valet/api/app.wsgi b/etc/valet/api/app.wsgi new file mode 100644 index 0000000..f32d0f4 --- /dev/null +++ b/etc/valet/api/app.wsgi @@ -0,0 +1,4 @@ +# /var/www/valet/app.wsgi +from valet.api.app import load_app + +application = load_app(config_file='/var/www/valet/config.py') \ No newline at end of file diff --git a/etc/valet/api/config.py b/etc/valet/api/config.py new file mode 100644 index 0000000..faab4e5 --- /dev/null +++ b/etc/valet/api/config.py @@ -0,0 +1,102 @@ +from oslo_config import cfg +from pecan.hooks import TransactionHook +from valet.api.db import models +from valet.api.common.hooks import NotFoundHook, MessageNotificationHook + + +CONF = cfg.CONF + +# Server Specific Configurations +server = { + 'port': CONF.server.port, + 'host': CONF.server.host +} + +# Pecan Application Configurations +app = { + 'root': 'valet.api.v1.controllers.root.RootController', + 'modules': ['valet.api'], + 'default_renderer': 'json', + 'force_canonical': False, + 'debug': False, + 'hooks': [ + TransactionHook( + models.start, + models.start_read_only, + models.commit, + models.rollback, + models.clear + ), + NotFoundHook(), + MessageNotificationHook(), + ], +} + +logging = { + 'root': {'level': 'INFO', 'handlers': ['console']}, + 'loggers': { + 'api': { + 'level': 'DEBUG', 'handlers': ['console'], 'propagate': False + }, + 'api.models': { + 'level': 'INFO', 'handlers': ['console'], 'propagate': False + }, + 'api.common': { + 'level': 'INFO', 'handlers': ['console'], 'propagate': False + }, + 'pecan': { + 'level': 'DEBUG', 'handlers': ['console'], 'propagate': False + }, + 'py.warnings': {'handlers': ['console']}, + '__force_dict__': True + }, + 'handlers': { + 'console': { + 'level': 'DEBUG', + 'class': 'logging.StreamHandler', + 'formatter': 'color' + } + }, + 'formatters': { + 'simple': { + 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]' + '[%(threadName)s] %(message)s') + }, + 'color': { + '()': 'pecan.log.ColorFormatter', + 'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]' + '[%(threadName)s] %(message)s'), + '__force_dict__': True + } + } +} + +ostro = { + 'tries': CONF.music.tries, + 'interval': CONF.music.interval, +} + + +messaging = { + 'config': { + 'transport_url': 'rabbit://' + CONF.messaging.username + ':' + CONF.messaging.password + + '@' + CONF.messaging.host + ':' + str(CONF.messaging.port) + '/' + } +} + +identity = { + 'config': { + 'username': CONF.identity.username, + 'password': CONF.identity.password, + 'project_name': CONF.identity.project_name, + 'auth_url': CONF.identity.auth_url, + 'interface': CONF.identity.interface, + } +} + +music = { + 'host': CONF.music.host, + 'port': CONF.music.port, + 'keyspace': CONF.music.keyspace, + 'replication_factor': CONF.music.replication_factor, +} diff --git a/etc/valet/openstack/notification_listener/notification_listener.py b/etc/valet/openstack/notification_listener/notification_listener.py new file mode 100644 index 0000000..a6f5402 --- /dev/null +++ b/etc/valet/openstack/notification_listener/notification_listener.py @@ -0,0 +1,24 @@ +import json +from oslo_config import cfg +import oslo_messaging + + +class NotificationEndpoint(object): + + def info(self, ctxt, publisher_id, event_type, payload, metadata): + print('recv notification:') + print(json.dumps(payload, indent=4)) + + def warn(self, ctxt, publisher_id, event_type, payload, metadata): + None + + def error(self, ctxt, publisher_id, event_type, payload, metadata): + None + +transport = oslo_messaging.get_transport(cfg.CONF) +targets = [oslo_messaging.Target(topic='notifications')] +endpoints = [NotificationEndpoint()] + +server = oslo_messaging.get_notification_listener(transport, targets, endpoints) +server.start() +server.wait() diff --git a/etc/valet/valet.conf b/etc/valet/valet.conf new file mode 100644 index 0000000..57b9db2 --- /dev/null +++ b/etc/valet/valet.conf @@ -0,0 +1,136 @@ +# __ +# /_\ |__| | +# / \ | | +# + +[server] +host = 0.0.0.0 +port = 8090 + +[messaging] +username = rabbitmq_username +password = rabbitmq_psw +host = rabbitmq_host +port = rabbitmq_port + +[identity] +project_name = project_name +username = project_username +password = project_username_password +auth_url = http://keystone_host:5000/v2.0 +# interface = admin + +# _ _ +# | \ |_\ +# |_/ |_/ +# + +[music] +host = music_host +port = 8080 +keyspace = valet_keyspace +replication_factor = 3 +# tries = 10 +# interval = 1 +# request_table = placement_requests +# response_table = placement_results +# event_table = oslo_messages +# resource_table = resource_status +# app_table = app +# resource_index_table = resource_log_index +# app_index_table = app_log_index +# uuid_table = uuid_map + + +# __ __ __ +# |__ |\ | | | |\ | |__ +# |__ | \| |__T | | \| |__ +# + +[engine] +# Set the location of daemon process id +pid = /var/run/valet/ostro-daemon.pid + +# Set IP of this Ostro +# ip = localhost + +# Used for Ostro active/passive selection +priority = 1 + + +#------------------------------------------------------------------------------------------------------------ +# Logging configuration +#------------------------------------------------------------------------------------------------------------ +# Set logging parameters +# logger_name = test +# logging level = [debug|info] +# logging_level = debug + +# Set the directory to locate the log file +# logging_dir = /var/log/valet/engine/ + +# Set the maximum size of the main logger as Byte +# max_main_log_size = 5000000 + +# Set the maximum logfile size as Byte for time-series log files +# max_log_size = 1000000 + +# Set the maximum number of time-series log files +# max_num_of_logs = 20 + +#------------------------------------------------------------------------------------------------------------ +# Management configuration +#------------------------------------------------------------------------------------------------------------ +# Inform the name of datacenter (region name), where Valet/Ostro is deployed. +# datacenter_name = bigsite + +# Set the naming convention rules. +# Currently, 3 chars of CLLI + region number + 'r' + rack id number + 1 char of node type + node id number. +# For example, pdk15r05c001 indicates the first KVM compute server (i.e., 'c001') in the fifth rack +# (i.e., 'r05') in the fifteenth DeKalb-Peachtree Airport Region (i.e., 'pdk15'). + +# Set the number of chars that indicates the region code. The above example, 'pdk' is the region code. +# num_of_region_chars = 3 + +# Set 1 char of rack indicator. This should be 'r'. +# rack_code_list = r + +# Set all of chars, each of which indicates the node type. +# Currently, 'a' = network, 'c' = KVM compute, 'u' = ESXi compute, 'f' = ?, 'o' = operation, 'p' = power, +# 's' = storage. +# node_code_list = a,c,u,f,o,p,s + +# Set trigger time or frequency for checking compute hosting server status (i.e., call Nova) +# Note that currently, compute (Nova) should be triggered first then trigger topology. +# compute_trigger_time = 01:00 +# compute_trigger_frequency = 3600 + +# Set trigger time or frequency for checking datacenter topology +# topology_trigger_time = 02:00 +# topology_trigger_frequency = 3600 + +# Set default overbooking ratios. Note that each compute node can have its own ratios. +# default_cpu_allocation_ratio = 16 +# default_ram_allocation_ratio = 1.5 +# default_disk_allocation_ratio = 1 + +# Set static unused percentages of resources (i.e., standby) that are set aside for applications's workload spikes. +# static_cpu_standby_ratio = 20 +# static_mem_standby_ratio = 20 +# static_local_disk_standby_ratio = 20 + +# Set Ostro execution mode +# mode = [live|sim], sim will let Ostro simulate datacenter, while live will let it handle a real datacenter +# mode = live +# Set the location of simulation configuration file (i.e., ostro_sim.cfg). +# This is used only when the simulation mode +# sim_cfg_loc = /etc/valet/engine/ostro_sim.cfg + +# Inform whether network controller (i.e., Tegu) has been deployed. +# If it does, set its API, Otherwise ignore these parameters +# network_control = no +# network_control_api = 29444/tegu/api + +# Set RPC server ip and port if used. Otherwise, ignore these parameters +# rpc_server_ip = localhost +# rpc_server_port = 8002 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..f4293c6 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +pip +pecan>=1.1.1 +pecan-notario<=0.0.3 +simplejson<=3.3.1 +#pymysql +#sqlalchemy +pika<=0.10.0 +python-daemon<=2.1.1 +#oslo.messaging!=1.17.0,!=1.17.1,!=2.6.0,!=2.6.1,!=2.7.0,!=2.8.0,!=2.8.1,!=2.9.0,!=3.1.0,>=1.16.0 # Apache-2.0 +#oslo.messaging==1.8.3 diff --git a/run_all_tests.sh b/run_all_tests.sh new file mode 100644 index 0000000..c5db7a5 --- /dev/null +++ b/run_all_tests.sh @@ -0,0 +1 @@ +sudo tox diff --git a/run_examples.sh b/run_examples.sh new file mode 100644 index 0000000..88a39fa --- /dev/null +++ b/run_examples.sh @@ -0,0 +1,14 @@ +# run specific tests: +# sudo tox -epy27 -- '(TestAffinity|TestDiversity)' + +# isolate +# sudo tox -- --isolated + +# run all tests until failure +# sudo tox -- --until-failure + +# unparallel running (serial) +# sudo tox -epy27 -- '--concurrency=1' + +# use commands = ostestr --slowest '{posargs}' in file tox.ini +# http://docs.openstack.org/developer/os-testr/ostestr.html#running-tests diff --git a/run_test.sh b/run_test.sh new file mode 100644 index 0000000..459967e --- /dev/null +++ b/run_test.sh @@ -0,0 +1,8 @@ + +sudo tox -epy27 -- '--concurrency=1' $* + +# EXAMPLE: +# ./run_test '(TestAffinity)' + +# run specific tests: +# sudo tox -epy27 -- '(TestAffinity|TestDiversity)' \ No newline at end of file diff --git a/run_until_fail.sh b/run_until_fail.sh new file mode 100644 index 0000000..4fb5333 --- /dev/null +++ b/run_until_fail.sh @@ -0,0 +1,4 @@ + +# run all tests in a loop until failure + +sudo tox -- --until-failure diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..1485afe --- /dev/null +++ b/setup.cfg @@ -0,0 +1,31 @@ +[metadata] +name = valet +summary = Valet Placement Service API +version = 0.1 +# description-file = README.md +author = AT&T +author-email = jdandrea@research.att.com +homepage = https://github.com/att-comdev/valet +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] +packages = valet +data_files = etc/valet/ = etc/* + +[entry_points] +pecan.command = + populate = valet.api.v1.commands.populate:PopulateCommand +tempest.test_plugins = + valet_tests = valet.tests.tempest.plugin:ValetTempestPlugin diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..fd309f1 --- /dev/null +++ b/setup.py @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Setup''' + +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa # pylint: disable=W0611,C0411 +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr>=1.8'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..a22accb --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,29 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + + +hacking<0.11,>=0.10.0 +os-testr<=0.7.0 +markupsafe<=0.23 +pecan<=0.8.2 +notario<=0.0.11 +coverage>=3.6 +python-subunit>=0.0.18 +mock>=1.2 +oslotest>=1.10.0 # Apache-2.0 +oslo.config>=1.9.0 +testrepository>=0.0.18 +sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 +testscenarios>=0.4 +testtools>=1.4.0 +oslo.i18n<=3.8.0 +oslo.log>=1.0.0 +pytz +python-keystoneclient<=3.4.0 +python-novaclient<=4.0.0 +python-heatclient<=1.2.0 + +oslo.messaging==1.8.3 +#tempest<=12.1.0 ---------- needs to be installed on Jenkins, no output when using tox +#tempest-lib>=0.8.0 \ No newline at end of file diff --git a/tools/conf.d/HAValet.conf b/tools/conf.d/HAValet.conf new file mode 100644 index 0000000..6f9fc85 --- /dev/null +++ b/tools/conf.d/HAValet.conf @@ -0,0 +1,6 @@ +[program:HAValet] +command=python /usr/local/lib/python2.7/dist-packages/valet/ha/ha_valet.py +autostart=true +autorestart=true +stderr_logfile=/var/log/HAValet.err.log +stdout_logfile=/var/log/HAValet.out.log diff --git a/tools/conf.d/music.conf b/tools/conf.d/music.conf new file mode 100644 index 0000000..34b3550 --- /dev/null +++ b/tools/conf.d/music.conf @@ -0,0 +1,15 @@ +[program:cassandra] +command=/bin/bash -c '/opt/app/apache-cassandra-2.1.1/bin/cassandra -f' +autostart=true +autorestart=true +stopsignal=KILL +stderr_logfile=/var/log/cassandra.err.log +stdout_logfile=/var/log/cassandra.out.log + +[program:Zookeeper] +command=/opt/app/zookeeper-3.4.6/bin/zkServer.sh start-foreground +autostart=true +autorestart=true +stopsignal=KILL +stderr_logfile=/var/log/zookeeper.err.log +stdout_logfile=/var/log/zookeeper.out.log \ No newline at end of file diff --git a/tools/utils/cleandb.sh b/tools/utils/cleandb.sh new file mode 100644 index 0000000..a291fc5 --- /dev/null +++ b/tools/utils/cleandb.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# drop keyspace +echo "drop valet keyspace" +/opt/app/apache-cassandra-2.1.1/bin/cqlsh -e "DROP KEYSPACE valet_test;" + +sleep 5 + +# populate tables +echo "populate valet tables" +# /opt/app/apache-cassandra-2.1.1/bin/cqlsh -f ./populate.cql +pecan populate /var/www/valet/config.py + +/opt/app/apache-cassandra-2.1.1/bin/cqlsh -e "DESCRIBE KEYSPACE valet_test;" + +echo "Done populating" diff --git a/tools/utils/populate.cql b/tools/utils/populate.cql new file mode 100644 index 0000000..2af8642 --- /dev/null +++ b/tools/utils/populate.cql @@ -0,0 +1,23 @@ +CREATE KEYSPACE IF NOT EXISTS valet_test WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': '3' } AND durable_writes = true; + +CREATE TABLE IF NOT EXISTS valet_test.placements(id text PRIMARY KEY, name text, orchestration_id text, resource_id text, location text, reserved boolean, plan_id text); + +CREATE TABLE IF NOT EXISTS valet_test.groups(id text PRIMARY KEY, name text, description text, type text, members text); + +CREATE TABLE IF NOT EXISTS valet_test.placement_requests(stack_id text PRIMARY KEY, request text); + +CREATE TABLE IF NOT EXISTS valet_test.placement_results(stack_id text PRIMARY KEY, placement text); + +CREATE TABLE IF NOT EXISTS valet_test.oslo_messages ("timestamp" text PRIMARY KEY, args text, exchange text, method text); + +CREATE TABLE IF NOT EXISTS valet_test.plans (id text PRIMARY KEY, name text, stack_id text); + +CREATE TABLE IF NOT EXISTS valet_test.uuid_map (uuid text PRIMARY KEY, h_uuid text, s_uuid text); + +CREATE TABLE IF NOT EXISTS valet_test.app (stack_id text PRIMARY KEY, app text); + +CREATE TABLE IF NOT EXISTS valet_test.resource_status (site_name text PRIMARY KEY, resource text); + +CREATE TABLE IF NOT EXISTS valet_test.resource_log_index (site_name text PRIMARY KEY, resource_log_index text); + +CREATE TABLE IF NOT EXISTS valet_test.app_log_index ( site_name text PRIMARY KEY, app_log_index text); diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..cd3f561 --- /dev/null +++ b/tox.ini @@ -0,0 +1,66 @@ +[tox] +#minversion = 2.0 +envlist = py27 +#py27-constraints, pep8-constraints +#py34-constraints,py27-constraints,pypy-constraints,pep8-constraints +#skipsdist = True + + +[testenv] +usedevelop = True +install_command = + pip install -U {opts} {packages} + +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=valet/tests/unit + + +#commands = python setup.py testr --slowest --testr-args='{posargs}' +commands = + find . -type f -name "*.pyc" -delete + ostestr --slowest '{posargs}' +deps = -r{toxinidir}/test-requirements.txt + +whitelist_externals = + bash + find + +[testenv:pep8] +commands = flake8 + + +[testenv:venv] +commands = {posargs} + + +[testenv:tempest] +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=valet/tests/tempest + +commands = python setup.py testr --slowest --testr-args='{posargs}' +# python setup.py testr --testr-args='{posargs}' | subunit-trace --no-failure-debug -f + + + +[testenv:cover] +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=valet/tests/unit/ +commands = + coverage erase + python setup.py test --slowest --coverage --coverage-package-name 'valet' --testr-args='{posargs}' + coverage html + coverage report + + +[testenv:docs] +commands = python setup.py build_sphinx + + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. + +show-source = True +ignore = E123,E125,E501,H401,H105,H301 +builtins = _ +exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build + diff --git a/valet/__init__.py b/valet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/api/PKG-INFO b/valet/api/PKG-INFO new file mode 100644 index 0000000..6dcdb4c --- /dev/null +++ b/valet/api/PKG-INFO @@ -0,0 +1,4 @@ +Metadata-Version: 1.2 +Name: api +Version: 0.1.0 +Author-email: jdandrea@research.att.com diff --git a/valet/api/__init__.py b/valet/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/api/app.py b/valet/api/app.py new file mode 100644 index 0000000..a51c23d --- /dev/null +++ b/valet/api/app.py @@ -0,0 +1,44 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Application''' + +from pecan.deploy import deploy +from pecan import make_app +from valet.api.common import identity, messaging +from valet.api.conf import register_conf, set_domain +from valet.api.db import models + + +def setup_app(config): + """ App Setup """ + identity.init_identity() + messaging.init_messaging() + models.init_model() + app_conf = dict(config.app) + + return make_app( + app_conf.pop('root'), + logging=getattr(config, 'logging', {}), **app_conf) + + +# entry point for apache2 +def load_app(config_file): + register_conf() + set_domain(project='valet') + return deploy(config_file) diff --git a/valet/api/common/__init__.py b/valet/api/common/__init__.py new file mode 100644 index 0000000..11cb891 --- /dev/null +++ b/valet/api/common/__init__.py @@ -0,0 +1,22 @@ +import ctypes + + +def terminate_thread(thread): + """Terminates a python thread from another thread. + + :param thread: a threading.Thread instance + """ + if not thread.isAlive(): + return + + print('valet watcher thread: notifier thread is alive... - kill it...') + exc = ctypes.py_object(SystemExit) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc) + if res == 0: + raise ValueError("nonexistent thread id") + elif res > 1: + # """if it returns a number greater than one, you're in trouble, + # and you should call it again with exc=NULL to revert the effect""" + ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) + raise SystemError("PyThreadState_SetAsyncExc failed") + print('valet watcher thread exits') diff --git a/valet/api/common/compute.py b/valet/api/common/compute.py new file mode 100644 index 0000000..b0a3622 --- /dev/null +++ b/valet/api/common/compute.py @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Compute helper library''' + +from novaclient import client +from pecan import conf + +# Nova API v2 +VERSION = 2 + + +def nova_client(): + '''Returns a nova client''' + sess = conf.identity.engine.session + nova = client.Client(VERSION, session=sess) + return nova diff --git a/valet/api/common/hooks.py b/valet/api/common/hooks.py new file mode 100644 index 0000000..16a67e9 --- /dev/null +++ b/valet/api/common/hooks.py @@ -0,0 +1,111 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Hooks''' + +import json +import logging +from valet.api.common.i18n import _ +from valet.api.common import terminate_thread +from valet.api.v1.controllers import error + +from pecan import conf +from pecan.hooks import PecanHook +import threading +import webob + + +LOG = logging.getLogger(__name__) + + +class MessageNotificationHook(PecanHook): + '''Send API request/responses out as Oslo msg notifications.''' + def after(self, state): + self.dummy = True + LOG.info('sending notification') + notifier = conf.messaging.notifier + status_code = state.response.status_code + status = webob.exc.status_map.get(status_code) + + if issubclass(status, webob.exc.HTTPOk): + notifier_fn = notifier.info + else: + notifier_fn = notifier.error + + ctxt = {} # Not using this just yet. + + request_path = state.request.path + + event_type_parts = ['api'] + api_version = state.request.path_info_pop() + if api_version: + event_type_parts.append(api_version) + api_subject = state.request.path_info_pop() + if api_subject: + event_type_parts.append(api_subject) + event_type = '.'.join(event_type_parts) + + request_method = state.request.method + try: + request_body = json.loads(state.request.body) + except ValueError: + request_body = None + try: + response_body = json.loads(state.response.body) + except ValueError: + response_body = state.response.body + + tenant_id = state.request.context.get('tenant_id', None) + user_id = state.request.context.get('user_id', None) + + payload = { + 'context': { + 'tenant_id': tenant_id, + 'user_id': user_id, + }, + 'request': { + 'method': request_method, + 'path': request_path, + 'body': request_body, + }, + 'response': { + 'status_code': status_code, + 'body': response_body, + } + } + + # notifier_fn blocks in case rabbit mq is down - it prevents Valet API to return its response :( + # send the notification in a different thread + notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload)) + notifier_thread.start() + # launch a timer to verify no hung threads are left behind + # (when timeout expired kill the notifier thread if it still alive) + watcher = threading.Timer(conf.messaging.timeout, terminate_thread, args=[notifier_thread]) + watcher.start() + + LOG.info('valet notification hook - end') + + +class NotFoundHook(PecanHook): + '''Catchall 'not found' hook for API''' + def on_error(self, state, exc): + self.dummy = True + '''Redirects to app-specific not_found endpoint if 404 only''' + if isinstance(exc, webob.exc.WSGIHTTPException) and exc.code == 404: + message = _('The resource could not be found.') + error('/errors/not_found', message) diff --git a/valet/api/common/i18n.py b/valet/api/common/i18n.py new file mode 100644 index 0000000..87e7508 --- /dev/null +++ b/valet/api/common/i18n.py @@ -0,0 +1,23 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +"""i18n library""" + +import gettext + +_ = gettext.gettext diff --git a/valet/api/common/identity.py b/valet/api/common/identity.py new file mode 100644 index 0000000..8dd4b58 --- /dev/null +++ b/valet/api/common/identity.py @@ -0,0 +1,155 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Identity helper library''' + +from datetime import datetime + +import iso8601 +# https://github.com/openstack/python-keystoneclient/blob/ +# master/keystoneclient/v2_0/client.py +# import keystoneauth1.exceptions +from keystoneauth1.identity import v2 +from keystoneauth1 import session +from keystoneclient.v2_0 import client +import logging +from pecan import conf +import pytz + +LOG = logging.getLogger(__name__) + + +def utcnow(): + '''Returns the time (UTC)''' + return datetime.now(tz=pytz.utc) + + +class Identity(object): + '''Convenience library for all identity service-related queries.''' + _args = None + _client = None + _interface = None + _session = None + + @classmethod + def is_token_admin(cls, token): + '''Returns true if decoded token has an admin role''' + for role in token.user.get('roles', []): + if role.get('name') == 'admin': + return True + return False + + @classmethod + def tenant_from_token(cls, token): + '''Returns tenant id from decoded token''' + return token.tenant.get('id', None) + + @classmethod + def user_from_token(cls, token): + '''Returns user id from decoded token''' + return token.user.get('id', None) + + def __init__(self, interface='admin', **kwargs): + '''Initializer.''' + self._interface = interface + self._args = kwargs + self._client = None + self._session = None + + @property + def _client_expired(self): + '''Returns True if cached client's token is expired.''' + # NOTE: Keystone may auto-regen the client now (v2? v3?) + # If so, this trip may no longer be necessary. Doesn't + # hurt to keep it around for the time being. + if not self._client or not self._client.auth_ref: + return True + token = self._client.auth_ref.get('token') + if not token: + return True + timestamp = token.get('expires') + if not timestamp: + return True + return iso8601.parse_date(timestamp) <= utcnow() + + @property + def client(self): + '''Returns an identity client.''' + if not self._client or self._client_expired: + auth = v2.Password(**self._args) + self._session = session.Session(auth=auth) + self._client = client.Client(session=self._session, + interface=self._interface) + return self._client + + @property + def session(self): + '''Read-only access to the session.''' + return self._session + + def validate_token(self, auth_token): + '''Returns validated token or None if invalid''' + kwargs = { + 'token': auth_token, + } + try: + return self.client.tokens.validate(**kwargs) + except Exception as ex: + LOG.error("Identity.validate_token: " + ex.message) + + return None + + def is_tenant_list_valid(self, tenant_list): + '''Returns true if tenant list contains valid tenant IDs''' + tenants = self.client.tenants.list() + if isinstance(tenant_list, list): + found = False + for tenant_id in tenant_list: + found = is_tenant_in_tenants(tenant_id, tenants) + if found: + break + return found + return False + + +def is_tenant_in_tenants(tenant_id, tenants): + for tenant in tenants: + if tenant_id == tenant.id: + return True + return False + + +def _identity_engine_from_config(config): + '''Initialize the identity engine based on supplied config.''' + # Using tenant_name instead of project name due to keystone v2 + kwargs = { + 'username': config.get('username'), + 'password': config.get('password'), + 'tenant_name': config.get('project_name'), + 'auth_url': config.get('auth_url'), + } + interface = config.get('interface') + engine = Identity(interface, **kwargs) + return engine + + +def init_identity(): + '''Initialize the identity engine and place in the config.''' + config = conf.identity.config + engine = _identity_engine_from_config(config) + conf.identity.engine = engine diff --git a/valet/api/common/identity.py.save b/valet/api/common/identity.py.save new file mode 100644 index 0000000..2fe871d --- /dev/null +++ b/valet/api/common/identity.py.save @@ -0,0 +1,147 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Identity helper library''' + +from datetime import datetime + +import iso8601 +# https://github.com/openstack/python-keystoneclient/blob/ +# master/keystoneclient/v2_0/client.py +import keystoneauth1.exceptions +from keystoneauth1.identity import v2 +from keystoneauth1 import session +from keystoneclient.v2_0 import client +from pecan import conf +import pytz + + +def utcnow(): + '''Returns the time (UTC)''' + return datetime.now(tz=pytz.utc) + + +class Identity(object): + '''Convenience library for all identity service-related queries.''' + _args = None + _client = None + _interface = None + _session = None + + @classmethod + def is_token_admin(cls, token): + '''Returns true if decoded token has an admin role''' + for role in token.user.get('roles', []): + if role.get('name') == 'admin': + return True + return False + + @classmethod + def tenant_from_token(cls, token): + '''Returns tenant id from decoded token''' + return token.tenant.get('id', None) + + @classmethod + def user_from_token(cls, token): + '''Returns user id from decoded token''' + return token.user.get('id', None) + + def __init__(self, interface='admin', **kwargs): + '''Initializer.''' + self._interface = interface + self._args = kwargs + self._client = None + self._session = None + + @property + def _client_expired(self): + '''Returns True if cached client's token is expired.''' + # NOTE: Keystone may auto-regen the client now (v2? v3?) + # If so, this trip may no longer be necessary. Doesn't + # hurt to keep it around for the time being. + if not self._client or not self._client.auth_ref: + return True + token = self._client.auth_ref.get('token') + if not token: + return True + timestamp = token.get('expires') + if not timestamp: + return True + return iso8601.parse_date(timestamp) <= utcnow() + + @property + def client(self): + '''Returns an identity client.''' + if not self._client or self._client_expired: + auth = v2.Password(**self._args) + self._session = session.Session(auth=auth) + self._client = client.Client(session=self._session, + interface=self._interface) + return self._client + + @property + def session(self): + '''Read-only access to the session.''' + return self._session + + def validate_token(self, auth_token): + '''Returns validated token or None if invalid''' + kwargs = { + 'token': auth_token, + } + try: + return self.client.tokens.validate(**kwargs) + except keystoneauth1.exceptions.http.NotFound: + # FIXME: Return a 404 or at least an auth required? + pass + return None + + '''Returns true if tenant list contains valid tenant IDs''' + tenants = self.client.tenants.list() + if isinstance(tenant_list, list): + for tenant_id in tenant_list: + found = False + for tenant in tenants: + if tenant_id == tenant.id: + found = True + break + if not found: + return False + return True + return False + + +def _identity_engine_from_config(config): + '''Initialize the identity engine based on supplied config.''' + # Using tenant_name instead of project name due to keystone v2 + kwargs = { + 'username': config.get('username'), + 'password': config.get('password'), + 'tenant_name': config.get('project_name'), + 'auth_url': config.get('auth_url'), + } + interface = config.get('interface') + engine = Identity(interface, **kwargs) + return engine + + +def init_identity(): + '''Initialize the identity engine and place in the config.''' + config = conf.identity.config + engine = _identity_engine_from_config(config) + conf.identity.engine = engine diff --git a/valet/api/common/messaging.py b/valet/api/common/messaging.py new file mode 100644 index 0000000..3244aaf --- /dev/null +++ b/valet/api/common/messaging.py @@ -0,0 +1,43 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Messaging helper library''' + +from oslo_config import cfg +import oslo_messaging as messaging +from pecan import conf +from valet.api.conf import set_domain, DOMAIN + + +def _messaging_notifier_from_config(config): + '''Initialize the messaging engine based on supplied config.''' + transport_url = config.get('transport_url') + transport = messaging.get_transport(cfg.CONF, transport_url) + notifier = messaging.Notifier(transport, driver='messaging', + publisher_id='valet', + topic='notifications', retry=10) + return notifier + + +def init_messaging(): + '''Initialize the messaging engine and place in the config.''' + set_domain(DOMAIN) + config = conf.messaging.config + notifier = _messaging_notifier_from_config(config) + conf.messaging.notifier = notifier + conf.messaging.timeout = cfg.CONF.messaging.timeout diff --git a/valet/api/common/ostro_helper.py b/valet/api/common/ostro_helper.py new file mode 100644 index 0000000..b68217b --- /dev/null +++ b/valet/api/common/ostro_helper.py @@ -0,0 +1,315 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Ostro helper library''' + +import json +import logging + +from pecan import conf +import time + +import uuid +from valet.api.common.i18n import _ +from valet.api.db.models import Group +from valet.api.db.models import PlacementRequest +from valet.api.db.models import PlacementResult +from valet.api.db.models import Query + +LOG = logging.getLogger(__name__) + +SERVICEABLE_RESOURCES = [ + 'OS::Nova::Server' +] +GROUP_ASSIGNMENT = 'ATT::Valet::GroupAssignment' +GROUP_TYPE = 'group_type' +GROUP_NAME = 'group_name' +AFFINITY = 'affinity' +DIVERSITY = 'diversity' +EXCLUSIVITY = 'exclusivity' + + +def _log(text, title="Ostro"): + '''Log helper''' + log_text = "%s: %s" % (title, text) + LOG.debug(log_text) + + +class Ostro(object): + '''Ostro optimization engine helper class.''' + + args = None + request = None + response = None + error_uri = None + tenant_id = None + + tries = None # Number of times to poll for placement. + interval = None # Interval in seconds to poll for placement. + + @classmethod + def _build_error(cls, message): + '''Build an Ostro-style error message''' + if not message: + message = _("Unknown error") + error = { + 'status': { + 'type': 'error', + 'message': message, + } + } + return error + + @classmethod + def _build_uuid_map(cls, resources): + '''Build a dict mapping names to UUIDs.''' + mapping = {} + for key in resources.iterkeys(): + if 'name' in resources[key]: + name = resources[key]['name'] + mapping[name] = key + return mapping + + @classmethod + def _sanitize_resources(cls, resources): + '''Ensure lowercase keys at the top level of each resource.''' + for res in resources.itervalues(): + for key in list(res.keys()): + if not key.islower(): + res[key.lower()] = res.pop(key) + return resources + + def __init__(self): + '''Initializer''' + self.tries = conf.music.get('tries', 10) + self.interval = conf.music.get('interval', 1) + + def _map_names_to_uuids(self, mapping, data): + '''Map resource names to their UUID equivalents.''' + if isinstance(data, dict): + for key in data.iterkeys(): + if key != 'name': + data[key] = self._map_names_to_uuids(mapping, data[key]) + elif isinstance(data, list): + for key, value in enumerate(data): + data[key] = self._map_names_to_uuids(mapping, value) + elif isinstance(data, basestring) and data in mapping: + return mapping[data] + return data + + def _prepare_resources(self, resources): + ''' Pre-digests resource data for use by Ostro. + + Maps Heat resource names to Orchestration UUIDs. + Ensures exclusivity groups exist and have tenant_id as a member. + ''' + mapping = self._build_uuid_map(resources) + ostro_resources = self._map_names_to_uuids(mapping, resources) + self._sanitize_resources(ostro_resources) + + verify_error = self._verify_groups(ostro_resources, self.tenant_id) + if isinstance(verify_error, dict): + return verify_error + return {'resources': ostro_resources} + + # TODO(JD): This really belongs in valet-engine once it exists. + def _send(self, stack_id, request): + '''Send request.''' + + # Creating the placement request effectively enqueues it. + PlacementRequest(stack_id=stack_id, request=request) # pylint: disable=W0612 + + # Wait for a response. + # TODO(JD): This is a blocking operation at the moment. + for __ in range(self.tries, 0, -1): # pylint: disable=W0612 + query = Query(PlacementResult) + placement_result = query.filter_by(stack_id=stack_id).first() + if placement_result: + placement = placement_result.placement + placement_result.delete() + return placement + else: + time.sleep(self.interval) + + self.error_uri = '/errors/server_error' + message = "Timed out waiting for a response." + response = self._build_error(message) + return json.dumps(response) + + def _verify_groups(self, resources, tenant_id): + ''' Verifies group settings. Returns an error status dict if the + + group type is invalid, if a group name is used when the type + is affinity or diversity, if a nonexistant exclusivity group + is found, or if the tenant is not a group member. + Returns None if ok. + ''' + message = None + for res in resources.itervalues(): + res_type = res.get('type') + if res_type == GROUP_ASSIGNMENT: + properties = res.get('properties') + group_type = properties.get(GROUP_TYPE, '').lower() + group_name = properties.get(GROUP_NAME, '').lower() + if group_type == AFFINITY or \ + group_type == DIVERSITY: + if group_name: + self.error_uri = '/errors/conflict' + message = _("%s must not be used when {0} is '{1}'. ").format(GROUP_NAME, GROUP_TYPE, group_type) + break + elif group_type == EXCLUSIVITY: + message = self._verify_exclusivity(group_name, tenant_id) + else: + self.error_uri = '/errors/invalid' + message = _("{0} '{1}' is invalid.").format(GROUP_TYPE, group_type) + break + if message: + return self._build_error(message) + + def _verify_exclusivity(self, group_name, tenant_id): + return_message = None + if not group_name: + self.error_uri = '/errors/invalid' + return _("%s must be used when {0} is '{1}'.").format(GROUP_NAME, GROUP_TYPE, EXCLUSIVITY) + + group = Group.query.filter_by( # pylint: disable=E1101 + name=group_name).first() + if not group: + self.error_uri = '/errors/not_found' + return_message = "%s '%s' not found" % (GROUP_NAME, group_name) + elif group and tenant_id not in group.members: + self.error_uri = '/errors/conflict' + return_message = _("Tenant ID %s not a member of {0} '{1}' ({2})").format(self.tenant_id, GROUP_NAME, group.name, group.id) + return return_message + + def build_request(self, **kwargs): + ''' Build an Ostro request. If False is returned, + + the response attribute contains status as to the error. + ''' + + # TODO(JD): Refactor this into create and update methods? + self.args = kwargs.get('args') + self.tenant_id = kwargs.get('tenant_id') + self.response = None + self.error_uri = None + + resources = self.args['resources'] + if 'resources_update' in self.args: + action = 'update' + resources_update = self.args['resources_update'] + else: + action = 'create' + resources_update = None + + # If we get any status in the response, it's an error. Bail. + self.response = self._prepare_resources(resources) + if 'status' in self.response: + return False + + self.request = { + "action": action, + "resources": self.response['resources'], + "stack_id": self.args['stack_id'], + } + + if resources_update: + # If we get any status in the response, it's an error. Bail. + self.response = self._prepare_resources(resources_update) + if 'status' in self.response: + return False + self.request['resources_update'] = self.response['resources'] + + return True + + def is_request_serviceable(self): + ''' Returns true if the request has at least one serviceable resource. ''' + # TODO(JD): Ostro should return no placements vs throw an error. + resources = self.request.get('resources', {}) + for res in resources.itervalues(): + res_type = res.get('type') + if res_type and res_type in SERVICEABLE_RESOURCES: + return True + return False + + def ping(self): + '''Send a ping request and obtain a response.''' + stack_id = str(uuid.uuid4()) + self.args = {'stack_id': stack_id} + self.response = None + self.error_uri = None + self.request = { + "action": "ping", + "stack_id": stack_id, + } + + def replan(self, **kwargs): + '''Replan a placement.''' + self.args = kwargs.get('args') + self.response = None + self.error_uri = None + self.request = { + "action": "replan", + "stack_id": self.args['stack_id'], + "locations": self.args['locations'], + "orchestration_id": self.args['orchestration_id'], + "exclusions": self.args['exclusions'], + } + + def migrate(self, **kwargs): + '''Replan the placement for an existing resource.''' + self.args = kwargs.get('args') + self.response = None + self.error_uri = None + self.request = { + "action": "migrate", + "stack_id": self.args['stack_id'], + "excluded_hosts": self.args['excluded_hosts'], + "orchestration_id": self.args['orchestration_id'], + } + + def query(self, **kwargs): + '''Send a query.''' + stack_id = str(uuid.uuid4()) + self.args = kwargs.get('args') + self.args['stack_id'] = stack_id + self.response = None + self.error_uri = None + self.request = { + "action": "query", + "stack_id": self.args['stack_id'], + "type": self.args['type'], + "parameters": self.args['parameters'], + } + + def send(self): + '''Send the request and obtain a response.''' + request_json = json.dumps([self.request]) + + # TODO(JD): Pass timeout value? + _log(request_json, 'Ostro Request') + result = self._send(self.args['stack_id'], request_json) + _log(result, 'Ostro Response') + + self.response = json.loads(result) + + status_type = self.response['status']['type'] + if status_type != 'ok': + self.error_uri = '/errors/server_error' + + return self.response diff --git a/valet/api/conf.py b/valet/api/conf.py new file mode 100644 index 0000000..fc20fbb --- /dev/null +++ b/valet/api/conf.py @@ -0,0 +1,69 @@ +from oslo_config import cfg + + +DOMAIN = 'valet' + + +CONF = cfg.CONF + +server_group = cfg.OptGroup(name='server', title='Valet API Server conf') +server_opts = [ + cfg.StrOpt('host', default='0.0.0.0'), + cfg.StrOpt('port', default='8090'), +] + + +messaging_group = cfg.OptGroup(name='messaging', title='Valet Messaging conf') +messaging_opts = [ + cfg.StrOpt('username'), + cfg.StrOpt('password'), + cfg.StrOpt('host'), + cfg.IntOpt('port', default=5672), + cfg.IntOpt('timeout', default=3), +] + + +identity_group = cfg.OptGroup(name='identity', title='Valet identity conf') +identity_opts = [ + cfg.StrOpt('username'), + cfg.StrOpt('password'), + cfg.StrOpt('project_name'), + cfg.StrOpt('auth_url', default='http://controller:5000/v2.0'), + cfg.StrOpt('interface', default='admin'), +] + + +music_group = cfg.OptGroup(name='music', title='Valet Persistence conf') +music_opts = [ + cfg.StrOpt('host', default='0.0.0.0'), + cfg.IntOpt('port', default=8080), + cfg.StrOpt('keyspace', default='valet'), + cfg.IntOpt('replication_factor', default=3), + cfg.IntOpt('tries', default=10), + cfg.IntOpt('interval', default=1), + cfg.StrOpt('request_table', default='placement_requests'), + cfg.StrOpt('response_table', default='placement_results'), + cfg.StrOpt('event_table', default='oslo_messages'), + cfg.StrOpt('resource_table', default='resource_status'), + cfg.StrOpt('app_table', default='app'), + cfg.StrOpt('resource_index_table', default='resource_log_index'), + cfg.StrOpt('app_index_table', default='app_log_index'), + cfg.StrOpt('uuid_table', default='uuid_map'), + cfg.StrOpt('db_host', default='localhost'), + # cfg.ListOpt('db_hosts', default='valet1,valet2,valet3') +] + + +def set_domain(project=DOMAIN): + CONF([], project) + + +def register_conf(): + CONF.register_group(server_group) + CONF.register_opts(server_opts, server_group) + CONF.register_group(music_group) + CONF.register_opts(music_opts, music_group) + CONF.register_group(identity_group) + CONF.register_opts(identity_opts, identity_group) + CONF.register_group(messaging_group) + CONF.register_opts(messaging_opts, messaging_group) diff --git a/valet/api/db/__init__.py b/valet/api/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/api/db/models/__init__.py b/valet/api/db/models/__init__.py new file mode 100644 index 0000000..e11cd78 --- /dev/null +++ b/valet/api/db/models/__init__.py @@ -0,0 +1,23 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=W0401 + +# Leave this here. We will eventually bring back sqlalchemy. +# When that happens, this needs to become a config option. +from .music import * # noqa diff --git a/valet/api/db/models/music/__init__.py b/valet/api/db/models/music/__init__.py new file mode 100644 index 0000000..75419dd --- /dev/null +++ b/valet/api/db/models/music/__init__.py @@ -0,0 +1,303 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Music ORM - Common Methods''' + +from abc import ABCMeta, abstractmethod +import inspect +from pecan import conf +import six +import uuid +from valet.api.common.i18n import _ +from valet.api.db.models.music.music import Music + + +def get_class(kls): + '''Returns a class given a fully qualified class name''' + parts = kls.split('.') + module = ".".join(parts[:-1]) + mod = __import__(module) + for comp in parts[1:]: + mod = getattr(mod, comp) + return mod + + +class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903 + '''Abstract Class Method from Python 3.3's abc module''' + + __isabstractmethod__ = True + + def __init__(self, callable): # pylint: disable=W0622 + callable.__isabstractmethod__ = True + super(abstractclassmethod, self).__init__(callable) + + +class ClassPropertyDescriptor(object): # pylint: disable=R0903 + '''Supports the notion of a class property''' + + def __init__(self, fget, fset=None): + '''Initializer''' + self.fget = fget + self.fset = fset + + def __get__(self, obj, klass=None): + '''Get attribute''' + if klass is None: + klass = type(obj) + return self.fget.__get__(obj, klass)() + + def __set__(self, obj, value): + '''Set attribute''' + if not self.fset: + raise AttributeError(_("Can't set attribute")) + type_ = type(obj) + return self.fset.__get__(obj, type_)(value) + + def setter(self, func): + '''Setter''' + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + self.fset = func + return self + + +def classproperty(func): + '''Class Property decorator''' + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + + return ClassPropertyDescriptor(func) + + +class Results(list): + '''Query results''' + + def __init__(self, *args, **kwargs): # pylint: disable=W0613 + '''Initializer''' + super(Results, self).__init__(args[0]) + + def all(self): + '''Return all''' + return self + + def first(self): + '''Return first''' + if len(self) > 0: + return self[0] + + +@six.add_metaclass(ABCMeta) +class Base(object): + ''' A custom declarative base that provides some Elixir-inspired shortcuts. ''' + + __tablename__ = None + + @classproperty + def query(cls): # pylint: disable=E0213 + '''Return a query object a la sqlalchemy''' + return Query(cls) + + @classmethod + def __kwargs(cls): + '''Return common keyword args''' + keyspace = conf.music.get('keyspace') + kwargs = { + 'keyspace': keyspace, + 'table': cls.__tablename__, + } + return kwargs + + @classmethod + def create_table(cls): + '''Create table''' + kwargs = cls.__kwargs() + kwargs['schema'] = cls.schema() + conf.music.engine.create_table(**kwargs) + + @abstractclassmethod + def schema(cls): + '''Return schema''' + return cls() + + @abstractclassmethod + def pk_name(cls): + '''Primary key name''' + return cls() + + @abstractmethod + def pk_value(self): + '''Primary key value''' + pass + + @abstractmethod + def values(self): + '''Values''' + pass + + def insert(self): + '''Insert row''' + kwargs = self.__kwargs() + kwargs['values'] = self.values() + pk_name = self.pk_name() + if pk_name not in kwargs['values']: + the_id = str(uuid.uuid4()) + kwargs['values'][pk_name] = the_id + setattr(self, pk_name, the_id) + conf.music.engine.create_row(**kwargs) + + def update(self): + '''Update row''' + kwargs = self.__kwargs() + kwargs['pk_name'] = self.pk_name() + kwargs['pk_value'] = self.pk_value() + kwargs['values'] = self.values() + conf.music.engine.update_row_eventually(**kwargs) + + def delete(self): + '''Delete row''' + kwargs = self.__kwargs() + kwargs['pk_name'] = self.pk_name() + kwargs['pk_value'] = self.pk_value() + conf.music.engine.delete_row_eventually(**kwargs) + + @classmethod + def filter_by(cls, **kwargs): + '''Filter objects''' + return cls.query.filter_by(**kwargs) # pylint: disable=E1101 + + def flush(self, *args, **kwargs): + '''Flush changes to storage''' + # TODO(JD): Implement in music? May be a no-op + pass + + def as_dict(self): + '''Return object representation as a dictionary''' + return dict((k, v) for k, v in self.__dict__.items() + if not k.startswith('_')) + + +class Query(object): + '''Data Query''' + model = None + + def __init__(self, model): + '''Initializer''' + if inspect.isclass(model): + self.model = model + elif isinstance(model, basestring): + self.model = get_class('valet.api.db.models.' + model) + assert inspect.isclass(self.model) + + def __kwargs(self): + '''Return common keyword args''' + keyspace = conf.music.get('keyspace') + kwargs = { + 'keyspace': keyspace, + 'table': self.model.__tablename__, # pylint: disable=E1101 + } + return kwargs + + def __rows_to_objects(self, rows): + '''Convert query response rows to objects''' + results = [] + pk_name = self.model.pk_name() # pylint: disable=E1101 + for __, row in rows.iteritems(): # pylint: disable=W0612 + the_id = row.pop(pk_name) + result = self.model(_insert=False, **row) + setattr(result, pk_name, the_id) + results.append(result) + return Results(results) + + def all(self): + '''Return all objects''' + kwargs = self.__kwargs() + rows = conf.music.engine.read_all_rows(**kwargs) + return self.__rows_to_objects(rows) + + def filter_by(self, **kwargs): + '''Filter objects''' + # Music doesn't allow filtering on anything but the primary key. + # We need to get all items and then go looking for what we want. + all_items = self.all() + filtered_items = Results([]) + + # For every candidate ... + for item in all_items: + passes = True + # All filters are AND-ed. + for key, value in kwargs.items(): + if getattr(item, key) != value: + passes = False + break + if passes: + filtered_items.append(item) + return filtered_items + + +def init_model(): + '''Data Store Initialization''' + conf.music.engine = _engine_from_config(conf.music) + keyspace = conf.music.get('keyspace') + conf.music.engine.create_keyspace(keyspace) + + +def _engine_from_config(configuration): + '''Create database engine object based on configuration''' + configuration = dict(configuration) + kwargs = { + 'host': configuration.get('host'), + 'port': configuration.get('port'), + 'replication_factor': configuration.get('replication_factor'), + } + return Music(**kwargs) + + +def start(): + '''Start transaction''' + pass + + +def start_read_only(): + '''Start read-only transaction''' + start() + + +def commit(): + '''Commit transaction''' + pass + + +def rollback(): + '''Rollback transaction''' + pass + + +def clear(): + '''Clear transaction''' + pass + + +def flush(): + '''Flush to disk''' + pass + + +from groups import Group # noqa +from ostro import PlacementRequest, PlacementResult, Event # noqa +from placements import Placement # noqa +from plans import Plan # noqa diff --git a/valet/api/db/models/music/groups.py b/valet/api/db/models/music/groups.py new file mode 100644 index 0000000..623716c --- /dev/null +++ b/valet/api/db/models/music/groups.py @@ -0,0 +1,94 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Group Model''' + +from . import Base +import simplejson + + +class Group(Base): + '''Group model''' + __tablename__ = 'groups' + + id = None # pylint: disable=C0103 + name = None + description = None + type = None # pylint: disable=W0622 + members = None + + @classmethod + def schema(cls): + '''Return schema.''' + schema = { + 'id': 'text', + 'name': 'text', + 'description': 'text', + 'type': 'text', + 'members': 'text', + 'PRIMARY KEY': '(id)', + } + return schema + + @classmethod + def pk_name(cls): + '''Primary key name''' + return 'id' + + def pk_value(self): + '''Primary key value''' + return self.id + + def values(self): + '''Values''' + # TODO(JD): Support lists in Music + # Lists aren't directly supported in Music, so we have to + # convert to/from json on the way out/in. + return { + 'name': self.name, + 'description': self.description, + 'type': self.type, + 'members': simplejson.dumps(self.members), + } + + def __init__(self, name, description, type, members, _insert=True): + '''Initializer''' + super(Group, self).__init__() + self.name = name + self.description = description or "" + self.type = type + if _insert: + self.members = [] # members ignored at init time + self.insert() + else: + # TODO(JD): Support lists in Music + self.members = simplejson.loads(members) + + def __repr__(self): + '''Object representation''' + return '' % self.name + + def __json__(self): + '''JSON representation''' + json_ = {} + json_['id'] = self.id + json_['name'] = self.name + json_['description'] = self.description + json_['type'] = self.type + json_['members'] = self.members + return json_ diff --git a/valet/api/db/models/music/music.py b/valet/api/db/models/music/music.py new file mode 100644 index 0000000..652b83c --- /dev/null +++ b/valet/api/db/models/music/music.py @@ -0,0 +1,335 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Music Data Store API''' + +import json +import logging +import time + +from valet.api.common.i18n import _ + +import requests + +LOG = logging.getLogger(__name__) + + +class REST(object): + '''Helper class for REST operations.''' + + hosts = None + port = None + path = None + timeout = None + + _urls = None + + def __init__(self, hosts, port, path='/', timeout='10'): + '''Initializer. Accepts target host list, port, and path.''' + + self.hosts = hosts # List of IP or FQDNs + self.port = port # Port Number + self.path = path # Path starting with / + self.timeout = float(timeout) # REST request timeout in seconds + + @property + def urls(self): + '''Returns list of URLs using each host, plus the port/path.''' + + if not self._urls: + urls = [] + for host in self.hosts: + # Must end without a slash + urls.append('http://%(host)s:%(port)s%(path)s' % { + 'host': host, + 'port': self.port, + 'path': self.path, + }) + self._urls = urls + return self._urls + + @staticmethod + def __headers(content_type='application/json'): + '''Returns HTTP request headers.''' + headers = { + 'accept': content_type, + 'content-type': content_type, + } + return headers + + def request(self, method='get', content_type='application/json', path='/', data=None): + ''' Performs HTTP request ''' + if method not in ('post', 'get', 'put', 'delete'): + raise KeyError(_("Method must be one of post, get, put, or delete.")) + method_fn = getattr(requests, method) + + response = None + for url in self.urls: + # Try each url in turn. First one to succeed wins. + full_url = url + path + try: + data_json = json.dumps(data) if data else None + LOG.debug("Music Request: %s %s%s", method.upper(), full_url, + data_json if data else '') + response = method_fn(full_url, data=data_json, + headers=self.__headers(content_type), + timeout=self.timeout) + response.raise_for_status() + return response + except requests.exceptions.Timeout as err: + response = requests.Response() + response.status_code = 408 + response.url = full_url + LOG.debug("Music: %s", err.message) + except requests.exceptions.RequestException as err: + response = requests.Response() + response.status_code = 400 + response.url = full_url + LOG.debug("Music: %s", err.message) + + # If we get here, an exception was raised for every url, + # but we passed so we could try each endpoint. Raise status + # for the last attempt (for now) so that we report something. + if response: + response.raise_for_status() + + +class Music(object): + '''Wrapper for Music API''' + lock_names = None # Cache of lock names created during session + lock_timeout = None # Maximum time in seconds to acquire a lock + + rest = None # API Endpoint + replication_factor = None # Number of Music nodes to replicate across + + def __init__(self, host=None, hosts=None, # pylint: disable=R0913 + port='8080', lock_timeout=10, replication_factor=3): + '''Initializer. Accepts a lock_timeout for atomic operations.''' + + # If one host is provided, that overrides the list + if not hosts: + hosts = ['localhost'] + if host: + hosts = [host] + + kwargs = { + 'hosts': hosts, + 'port': port, + 'path': '/MUSIC/rest', + } + self.rest = REST(**kwargs) + + self.lock_names = [] + self.lock_timeout = lock_timeout + + self.replication_factor = replication_factor + + def create_keyspace(self, keyspace): + '''Creates a keyspace.''' + data = { + 'replicationInfo': { + # 'class': 'NetworkTopologyStrategy', + # 'dc1': self.replication_factor, + 'class': 'SimpleStrategy', + 'replication_factor': self.replication_factor, + }, + 'durabilityOfWrites': True, + 'consistencyInfo': { + 'type': 'eventual', + }, + } + + path = '/keyspaces/%s' % keyspace + response = self.rest.request(method='post', path=path, data=data) + return response.ok + + def create_table(self, keyspace, table, schema): + '''Creates a table.''' + data = { + 'fields': schema, + 'consistencyInfo': { + 'type': 'eventual', + }, + } + + path = '/keyspaces/%(keyspace)s/tables/%(table)s/' % { + 'keyspace': keyspace, + 'table': table, + } + + response = self.rest.request(method='post', path=path, data=data) + return response.ok + + def version(self): + '''Returns version string.''' + path = '/version' + response = self.rest.request(method='get', + content_type='text/plain', path=path) + return response.text + + def create_row(self, keyspace, table, values): + '''Create a row.''' + data = { + 'values': values, + 'consistencyInfo': { + 'type': 'eventual', + }, + } + + path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % { + 'keyspace': keyspace, + 'table': table, + } + response = self.rest.request(method='post', path=path, data=data) + return response.ok + + def create_lock(self, lock_name): + '''Returns the lock id. Use for acquiring and releasing.''' + path = '/locks/create/%s' % lock_name + response = self.rest.request(method='post', + content_type='text/plain', path=path) + return response.text + + def acquire_lock(self, lock_id): + '''Acquire a lock.''' + path = '/locks/acquire/%s' % lock_id + response = self.rest.request(method='get', + content_type='text/plain', path=path) + + return response.text.lower() == 'true' + + def release_lock(self, lock_id): + '''Release a lock.''' + path = '/locks/release/%s' % lock_id + response = self.rest.request(method='delete', + content_type='text/plain', path=path) + return response.ok + + @staticmethod + def __row_url_path(keyspace, table, pk_name, pk_value): + '''Returns a Music-compliant row URL path.''' + path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % { + 'keyspace': keyspace, + 'table': table, + } + + if pk_name and pk_value: + path += '?%s=%s' % (pk_name, pk_value) + return path + + def update_row_eventually(self, keyspace, table, # pylint: disable=R0913 + pk_name, pk_value, values): + '''Update a row. Not atomic.''' + data = { + 'values': values, + 'consistencyInfo': { + 'type': 'eventual', + }, + } + + path = self.__row_url_path(keyspace, table, pk_name, pk_value) + response = self.rest.request(method='put', path=path, data=data) + return response.ok + + def update_row_atomically(self, keyspace, table, # pylint: disable=R0913 + pk_name, pk_value, values): + '''Update a row atomically.''' + + # Create lock for the candidate. The Music API dictates that the + # lock name must be of the form keyspace.table.primary_key + lock_name = '%(keyspace)s.%(table)s.%(primary_key)s' % { + 'keyspace': keyspace, + 'table': table, + 'primary_key': pk_value, + } + self.lock_names.append(lock_name) + lock_id = self.create_lock(lock_name) + + time_now = time.time() + while not self.acquire_lock(lock_id): + if time.time() - time_now > self.lock_timeout: + raise IndexError(_('Lock acquire timeout: %s') % lock_name) + + # Update entry now that we have the lock. + data = { + 'values': values, + 'consistencyInfo': { + 'type': 'atomic', + 'lockId': lock_id, + }, + } + + path = self.__row_url_path(keyspace, table, pk_name, pk_value) + response = self.rest.request(method='put', path=path, data=data) + + # Release lock now that the operation is done. + self.release_lock(lock_id) + # FIXME: Wouldn't we delete the lock at this point? + + return response.ok + + def delete_row_eventually(self, keyspace, table, pk_name, pk_value): + '''Delete a row. Not atomic.''' + data = { + 'consistencyInfo': { + 'type': 'eventual', + }, + } + + path = self.__row_url_path(keyspace, table, pk_name, pk_value) + response = self.rest.request(method='delete', path=path, data=data) + return response.ok + + def read_row(self, keyspace, table, pk_name, pk_value, log=None): + '''Read one row based on a primary key name/value.''' + path = self.__row_url_path(keyspace, table, pk_name, pk_value) + response = self.rest.request(path=path) + if log: + log.debug("response is %s, path is %s" % (response, path)) + return response.json() + + def read_all_rows(self, keyspace, table): + '''Read all rows.''' + return self.read_row(keyspace, table, pk_name=None, pk_value=None) + + def drop_keyspace(self, keyspace): + '''Drops a keyspace.''' + data = { + 'consistencyInfo': { + 'type': 'eventual', + }, + } + + path = '/keyspaces/%s' % keyspace + response = self.rest.request(method='delete', path=path, data=data) + return response.ok + + def delete_lock(self, lock_name): + '''Deletes a lock by name.''' + path = '/locks/delete/%s' % lock_name + response = self.rest.request(content_type='text/plain', + method='delete', path=path) + return response.ok + + def delete_all_locks(self): + '''Delete all locks created during the lifetime of this object.''' + + # TODO(JD): Shouldn't this really be part of internal cleanup? + # FIXME: It can be several API calls. Any way to do in one fell swoop? + for lock_name in self.lock_names: + self.delete_lock(lock_name) diff --git a/valet/api/db/models/music/ostro.py b/valet/api/db/models/music/ostro.py new file mode 100644 index 0000000..fd0aa07 --- /dev/null +++ b/valet/api/db/models/music/ostro.py @@ -0,0 +1,180 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Ostro Models''' + +from . import Base + + +class PlacementRequest(Base): + '''Placement Request Model''' + __tablename__ = 'placement_requests' + + stack_id = None + request = None + + @classmethod + def schema(cls): + '''Return schema.''' + schema = { + 'stack_id': 'text', + 'request': 'text', + 'PRIMARY KEY': '(stack_id)', + } + return schema + + @classmethod + def pk_name(cls): + '''Primary key name''' + return 'stack_id' + + def pk_value(self): + '''Primary key value''' + return self.stack_id + + def values(self): + '''Values''' + return { + 'stack_id': self.stack_id, + 'request': self.request, + } + + def __init__(self, request, stack_id=None, _insert=True): + '''Initializer''' + super(PlacementRequest, self).__init__() + self.stack_id = stack_id + self.request = request + if _insert: + self.insert() + + def __repr__(self): + '''Object representation''' + return '' % self.stack_id + + def __json__(self): + '''JSON representation''' + json_ = {} + json_['stack_id'] = self.stack_id + json_['request'] = self.request + return json_ + + +class PlacementResult(Base): + '''Placement Result Model''' + __tablename__ = 'placement_results' + + stack_id = None + placement = None + + @classmethod + def schema(cls): + '''Return schema.''' + schema = { + 'stack_id': 'text', + 'placement': 'text', + 'PRIMARY KEY': '(stack_id)', + } + return schema + + @classmethod + def pk_name(cls): + '''Primary key name''' + return 'stack_id' + + def pk_value(self): + '''Primary key value''' + return self.stack_id + + def values(self): + '''Values''' + return { + 'stack_id': self.stack_id, + 'placement': self.placement, + } + + def __init__(self, placement, stack_id=None, _insert=True): + '''Initializer''' + super(PlacementResult, self).__init__() + self.stack_id = stack_id + self.placement = placement + if _insert: + self.insert() + + def __repr__(self): + '''Object representation''' + return '' % self.stack_id + + def __json__(self): + '''JSON representation''' + json_ = {} + json_['stack_id'] = self.stack_id + json_['placement'] = self.placement + return json_ + + +class Event(Base): + '''Event Model''' + __tablename__ = 'events' + + event_id = None + event = None + + @classmethod + def schema(cls): + '''Return schema.''' + schema = { + 'event_id': 'text', + 'event': 'text', + 'PRIMARY KEY': '(event_id)', + } + return schema + + @classmethod + def pk_name(cls): + '''Primary key name''' + return 'event_id' + + def pk_value(self): + '''Primary key value''' + return self.event_id + + def values(self): + '''Values''' + return { + 'event_id': self.event_id, + 'event': self.event, + } + + def __init__(self, event, event_id=None, _insert=True): + '''Initializer''' + super(Event, self).__init__() + self.event_id = event_id + self.event = event + if _insert: + self.insert() + + def __repr__(self): + '''Object representation''' + return '' % self.event_id + + def __json__(self): + '''JSON representation''' + json_ = {} + json_['event_id'] = self.event_id + json_['event'] = self.event + return json_ diff --git a/valet/api/db/models/music/placements.py b/valet/api/db/models/music/placements.py new file mode 100644 index 0000000..d329d33 --- /dev/null +++ b/valet/api/db/models/music/placements.py @@ -0,0 +1,101 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Placement Model''' + +from . import Base, Query + + +class Placement(Base): + '''Placement Model''' + __tablename__ = 'placements' + + id = None # pylint: disable=C0103 + name = None + orchestration_id = None + resource_id = None + location = None + plan_id = None + plan = None + + @classmethod + def schema(cls): + '''Return schema.''' + schema = { + 'id': 'text', + 'name': 'text', + 'orchestration_id': 'text', + 'resource_id': 'text', + 'location': 'text', + 'reserved': 'boolean', + 'plan_id': 'text', + 'PRIMARY KEY': '(id)', + } + return schema + + @classmethod + def pk_name(cls): + '''Primary key name''' + return 'id' + + def pk_value(self): + '''Primary key value''' + return self.id + + def values(self): + '''Values''' + return { + 'name': self.name, + 'orchestration_id': self.orchestration_id, + 'resource_id': self.resource_id, + 'location': self.location, + 'reserved': self.reserved, + 'plan_id': self.plan_id, + } + + def __init__(self, name, orchestration_id, resource_id=None, plan=None, + plan_id=None, location=None, reserved=False, _insert=True): + '''Initializer''' + super(Placement, self).__init__() + self.name = name + self.orchestration_id = orchestration_id + self.resource_id = resource_id + if plan_id: + plan = Query("Plan").filter_by(id=plan_id).first() + self.plan = plan + self.plan_id = plan.id + self.location = location + self.reserved = reserved + if _insert: + self.insert() + + def __repr__(self): + '''Object representation''' + return '' % self.name + + def __json__(self): + '''JSON representation''' + json_ = {} + json_['id'] = self.id + json_['name'] = self.name + json_['orchestration_id'] = self.orchestration_id + json_['resource_id'] = self.resource_id + json_['location'] = self.location + json_['reserved'] = self.reserved + json_['plan_id'] = self.plan.id + return json_ diff --git a/valet/api/db/models/music/plans.py b/valet/api/db/models/music/plans.py new file mode 100644 index 0000000..bd70f75 --- /dev/null +++ b/valet/api/db/models/music/plans.py @@ -0,0 +1,98 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Plan Model''' + +from . import Base, Query + + +class Plan(Base): + '''Plan model''' + __tablename__ = 'plans' + + id = None # pylint: disable=C0103 + name = None + stack_id = None + + @classmethod + def schema(cls): + '''Return schema.''' + schema = { + 'id': 'text', + 'name': 'text', + 'stack_id': 'text', + 'PRIMARY KEY': '(id)', + } + return schema + + @classmethod + def pk_name(cls): + '''Primary key name''' + return 'id' + + def pk_value(self): + '''Primary key value''' + return self.id + + def values(self): + '''Values''' + return { + 'name': self.name, + 'stack_id': self.stack_id, + } + + def __init__(self, name, stack_id, _insert=True): + '''Initializer''' + super(Plan, self).__init__() + self.name = name + self.stack_id = stack_id + if _insert: + self.insert() + + def placements(self): + '''Return list of placements''' + + # TODO(JD): Make this a property? + all_results = Query("Placement").all() + results = [] + for placement in all_results: + if placement.plan_id == self.id: + results.append(placement) + return results + + @property + def orchestration_ids(self): + '''Return list of orchestration IDs''' + return list(set([p.orchestration_id for p in self.placements()])) + + def __repr__(self): + '''Object representation''' + return '' % self.name + + def __json__(self): + '''JSON representation''' + json_ = {} + json_['id'] = self.id + json_['stack_id'] = self.stack_id + json_['name'] = self.name + json_['placements'] = {} + for placement in self.placements(): + json_['placements'][placement.orchestration_id] = dict( + name=placement.name, + location=placement.location) + return json_ diff --git a/valet/api/v1/__init__.py b/valet/api/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/api/v1/commands/__init__.py b/valet/api/v1/commands/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/api/v1/commands/populate.py b/valet/api/v1/commands/populate.py new file mode 100644 index 0000000..606580c --- /dev/null +++ b/valet/api/v1/commands/populate.py @@ -0,0 +1,72 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Populate command''' + +from pecan.commands.base import BaseCommand +# from pecan import conf + +from valet.api.common.i18n import _ +from valet.api.conf import register_conf, set_domain +from valet.api.db import models +from valet.api.db.models import Event +from valet.api.db.models import Group +from valet.api.db.models import Placement +from valet.api.db.models import PlacementRequest +from valet.api.db.models import PlacementResult +from valet.api.db.models import Plan + + +def out(string): + '''Output helper''' + print("==> %s" % string) + + +class PopulateCommand(BaseCommand): + '''Load a pecan environment and initializate the database.''' + + def run(self, args): + super(PopulateCommand, self).run(args) + out(_("Loading environment")) + register_conf() + set_domain() + self.load_app() + out(_("Building schema")) + try: + out(_("Starting a transaction...")) + models.start() + + # FIXME: There's no create_all equivalent for Music. + # models.Base.metadata.create_all(conf.sqlalchemy.engine) + + # Valet + Group.create_table() + Placement.create_table() + Plan.create_table() + + # Ostro + Event.create_table() + PlacementRequest.create_table() + PlacementResult.create_table() + except Exception: + models.rollback() + out(_("Rolling back...")) + raise + else: + out(_("Committing.")) + models.commit() diff --git a/valet/api/v1/controllers/__init__.py b/valet/api/v1/controllers/__init__.py new file mode 100644 index 0000000..db56856 --- /dev/null +++ b/valet/api/v1/controllers/__init__.py @@ -0,0 +1,128 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Controllers Package''' + +import logging +from notario.decorators import instance_of +from notario import ensure +from os import path + +from pecan import redirect, request +import string +from valet.api.common.i18n import _ +from valet.api.db.models import Placement + +LOG = logging.getLogger(__name__) + + +# +# Notario Helpers +# + +def valid_group_name(value): + '''Validator for group name type.''' + if not value or not set(value) <= set(string.letters + string.digits + "-._~"): + LOG.error("group name is not valid") + LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \ + hyphens, periods, underscores, and tildes [RFC 3986, Section 2.3]") + + +@instance_of((list, dict)) +def valid_plan_resources(value): + '''Validator for plan resources.''' + ensure(len(value) > 0) + + +def valid_plan_update_action(value): + '''Validator for plan update action.''' + assert value in ['update', 'migrate'], _("must be update or migrate") + +# +# Placement Helpers +# + + +def set_placements(plan, resources, placements): + '''Set placements''' + for uuid in placements.iterkeys(): + name = resources[uuid]['name'] + properties = placements[uuid]['properties'] + location = properties['host'] + Placement(name, uuid, plan=plan, location=location) # pylint: disable=W0612 + + return plan + + +def reserve_placement(placement, resource_id=None, reserve=True, update=True): + ''' Reserve placement. Can optionally set the physical resource id. + + Set reserve=False to unreserve. Set update=False to not update + the data store (if the update will be made later). + ''' + if placement: + LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'), + {'rsrv': _("Reserving") if reserve else _("Unreserving"), + 'orch_id': placement.orchestration_id, + 'loc': placement.location}) + placement.reserved = reserve + if resource_id: + LOG.info(_('Associating resource id %(res_id)s with ' + 'orchestration id %(orch_id)s.'), + {'res_id': resource_id, + 'orch_id': placement.orchestration_id}) + placement.resource_id = resource_id + if update: + placement.update() + + +def update_placements(placements, reserve_id=None, unlock_all=False): + '''Update placements. Optionally reserve one placement.''' + for uuid in placements.iterkeys(): + placement = Placement.query.filter_by( # pylint: disable=E1101 + orchestration_id=uuid).first() + if placement: + properties = placements[uuid]['properties'] + location = properties['host'] + if placement.location != location: + LOG.info(_('Changing placement of %(orch_id)s ' + 'from %(old_loc)s to %(new_loc)s.'), + {'orch_id': placement.orchestration_id, + 'old_loc': placement.location, + 'new_loc': location}) + placement.location = location + if unlock_all: + reserve_placement(placement, reserve=False, update=False) + elif reserve_id and placement.orchestration_id == reserve_id: + reserve_placement(placement, reserve=True, update=False) + placement.update() + return + + +# +# Error Helpers +# + +def error(url, msg=None, **kwargs): + '''Error handler''' + if msg: + request.context['error_message'] = msg + if kwargs: + request.context['kwargs'] = kwargs + url = path.join(url, '?error_message=%s' % msg) + redirect(url, internal=True) diff --git a/valet/api/v1/controllers/errors.py b/valet/api/v1/controllers/errors.py new file mode 100644 index 0000000..3be5957 --- /dev/null +++ b/valet/api/v1/controllers/errors.py @@ -0,0 +1,140 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Errors''' + +import logging +from pecan import expose, request, response +from valet.api.common.i18n import _ +from webob.exc import status_map + +LOG = logging.getLogger(__name__) + +# pylint: disable=R0201 + + +def error_wrapper(func): + '''Error decorator.''' + def func_wrapper(self, **kw): + '''Wrapper.''' + + kwargs = func(self, **kw) + status = status_map.get(response.status_code) + message = getattr(status, 'explanation', '') + explanation = request.context.get('error_message', message) + error_type = status.__name__ + title = status.title + traceback = getattr(kwargs, 'traceback', None) + + LOG.error(explanation) + + # Modeled after Heat's format + return { + "explanation": explanation, + "code": response.status_code, + "error": { + "message": message, + "traceback": traceback, + "type": error_type, + }, + "title": title, + } + return func_wrapper + + +# pylint: disable=W0613 +class ErrorsController(object): + ''' Errors Controller /errors/{error_name} ''' + + @expose('json') + @error_wrapper + def schema(self, **kw): + '''400''' + request.context['error_message'] = str(request.validation_error) + response.status = 400 + return request.context.get('kwargs') + + @expose('json') + @error_wrapper + def invalid(self, **kw): + '''400''' + response.status = 400 + return request.context.get('kwargs') + + @expose() + def unauthorized(self, **kw): + '''401''' + # This error is terse and opaque on purpose. + # Don't give any clues to help AuthN along. + response.status = 401 + response.content_type = 'text/plain' + LOG.error('unauthorized') + import traceback + traceback.print_stack() + LOG.error(self.__class__) + LOG.error(kw) + response.body = _('Authentication required') + LOG.error(response.body) + return response + + @expose('json') + @error_wrapper + def forbidden(self, **kw): + '''403''' + response.status = 403 + return request.context.get('kwargs') + + @expose('json') + @error_wrapper + def not_found(self, **kw): + '''404''' + response.status = 404 + return request.context.get('kwargs') + + @expose('json') + @error_wrapper + def not_allowed(self, **kw): + '''405''' + kwargs = request.context.get('kwargs') + if kwargs: + allow = kwargs.get('allow', None) + if allow: + response.headers['Allow'] = allow + response.status = 405 + return kwargs + + @expose('json') + @error_wrapper + def conflict(self, **kw): + '''409''' + response.status = 409 + return request.context.get('kwargs') + + @expose('json') + @error_wrapper + def server_error(self, **kw): + '''500''' + response.status = 500 + return request.context.get('kwargs') + + @expose('json') + @error_wrapper + def unavailable(self, **kw): + '''503''' + response.status = 503 + return request.context.get('kwargs') diff --git a/valet/api/v1/controllers/groups.py b/valet/api/v1/controllers/groups.py new file mode 100644 index 0000000..7cb5440 --- /dev/null +++ b/valet/api/v1/controllers/groups.py @@ -0,0 +1,321 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Groups''' + +import logging + +from notario import decorators +from notario.validators import types +from pecan import conf, expose, request, response +from pecan_notario import validate + +from valet.api.common.compute import nova_client +from valet.api.common.i18n import _ +from valet.api.common.ostro_helper import Ostro +from valet.api.db.models import Group +from valet.api.v1.controllers import error, valid_group_name + +LOG = logging.getLogger(__name__) + +GROUPS_SCHEMA = ( + (decorators.optional('description'), types.string), + ('name', valid_group_name), + ('type', types.string) +) + +UPDATE_GROUPS_SCHEMA = ( + (decorators.optional('description'), types.string) +) + +MEMBERS_SCHEMA = ( + ('members', types.array) +) + +# pylint: disable=R0201 + + +def server_list_for_group(group): + '''Returns a list of VMs associated with a member/group.''' + args = { + "type": "group_vms", + "parameters": { + "group_name": group.name, + }, + } + ostro_kwargs = { + "args": args, + } + ostro = Ostro() + ostro.query(**ostro_kwargs) + ostro.send() + + status_type = ostro.response['status']['type'] + if status_type != 'ok': + message = ostro.response['status']['message'] + error(ostro.error_uri, _('Ostro error: %s') % message) + + resources = ostro.response['resources'] + return resources or [] + + +def tenant_servers_in_group(tenant_id, group): + ''' Returns a list of servers the current tenant has in group_name ''' + servers = [] + server_list = server_list_for_group(group) + nova = nova_client() + for server_id in server_list: + try: + server = nova.servers.get(server_id) + if server.tenant_id == tenant_id: + servers.append(server_id) + except Exception as ex: # TODO(JD): update DB + LOG.error("Instance %s could not be found" % server_id) + LOG.error(ex) + if len(servers) > 0: + return servers + + +def no_tenant_servers_in_group(tenant_id, group): + ''' Verify no servers from tenant_id are in group. + + Throws a 409 Conflict if any are found. + ''' + server_list = tenant_servers_in_group(tenant_id, group) + if server_list: + error('/errors/conflict', _('Tenant Member {0} has servers in group "{1}": {2}').format(tenant_id, group.name, server_list)) + + +class MembersItemController(object): + ''' Members Item Controller /v1/groups/{group_id}/members/{member_id} ''' + + def __init__(self, member_id): + '''Initialize group member''' + group = request.context['group'] + if member_id not in group.members: + error('/errors/not_found', _('Member not found in group')) + request.context['member_id'] = member_id + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET,DELETE' + + @expose(generic=True, template='json') + def index(self): + '''Catch all for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Verify group member''' + response.status = 204 + + @index.when(method='DELETE', template='json') + def index_delete(self): + '''Delete group member''' + group = request.context['group'] + member_id = request.context['member_id'] + + # Can't delete a member if it has associated VMs. + no_tenant_servers_in_group(member_id, group) + + group.members.remove(member_id) + group.update() + response.status = 204 + + +class MembersController(object): + ''' Members Controller /v1/groups/{group_id}/members ''' + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'PUT,DELETE' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='PUT', template='json') + @validate(MEMBERS_SCHEMA, '/errors/schema') + def index_put(self, **kwargs): + '''Add one or more members to a group''' + new_members = kwargs.get('members', None) + + if not conf.identity.engine.is_tenant_list_valid(new_members): + error('/errors/conflict', _('Member list contains invalid tenant IDs')) + + group = request.context['group'] + group.members = list(set(group.members + new_members)) + group.update() + response.status = 201 + + # Flush so that the DB is current. + group.flush() + return group + + @index.when(method='DELETE', template='json') + def index_delete(self): + '''Delete all group members''' + group = request.context['group'] + + # Can't delete a member if it has associated VMs. + for member_id in group.members: + no_tenant_servers_in_group(member_id, group) + + group.members = [] + group.update() + response.status = 204 + + @expose() + def _lookup(self, member_id, *remainder): + '''Pecan subcontroller routing callback''' + return MembersItemController(member_id), remainder + + +class GroupsItemController(object): + ''' Groups Item Controller /v1/groups/{group_id} ''' + + members = MembersController() + + def __init__(self, group_id): + '''Initialize group''' + group = Group.query.filter_by(id=group_id).first() # pylint: disable=E1101 + if not group: + error('/errors/not_found', _('Group not found')) + request.context['group'] = group + + @classmethod + def allow(cls): + ''' Allowed methods ''' + return 'GET,PUT,DELETE' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Display a group''' + return {"group": request.context['group']} + + @index.when(method='PUT', template='json') + @validate(UPDATE_GROUPS_SCHEMA, '/errors/schema') + def index_put(self, **kwargs): + '''Update a group''' + # Name and type are immutable. + # Group Members are updated in MembersController. + group = request.context['group'] + group.description = kwargs.get('description', group.description) + group.update() + response.status = 201 + + # Flush so that the DB is current. + group.flush() + return group + + @index.when(method='DELETE', template='json') + def index_delete(self): + '''Delete a group''' + group = request.context['group'] + if isinstance(group.members, list) and len(group.members) > 0: + error('/errors/conflict', _('Unable to delete a Group with members.')) + group.delete() + response.status = 204 + + +class GroupsController(object): + ''' Groups Controller /v1/groups ''' + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET,POST' + + @expose(generic=True, template='json') + def index(self): + '''Catch all for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''List groups''' + groups_array = [] + for group in Group.query.all(): # pylint: disable=E1101 + groups_array.append(group) + return {'groups': groups_array} + + @index.when(method='POST', template='json') + @validate(GROUPS_SCHEMA, '/errors/schema') + def index_post(self, **kwargs): + '''Create a group''' + group_name = kwargs.get('name', None) + description = kwargs.get('description', None) + group_type = kwargs.get('type', None) + members = [] # Use /v1/groups/members endpoint to add members + + try: + group = Group(group_name, description, group_type, members) + if group: + response.status = 201 + + # Flush so that the DB is current. + group.flush() + return group + except Exception as e: + error('/errors/server_error', _('Unable to create Group. %s') % e) + + @expose() + def _lookup(self, group_id, *remainder): + '''Pecan subcontroller routing callback''' + return GroupsItemController(group_id), remainder diff --git a/valet/api/v1/controllers/placements.py b/valet/api/v1/controllers/placements.py new file mode 100644 index 0000000..599f1bb --- /dev/null +++ b/valet/api/v1/controllers/placements.py @@ -0,0 +1,196 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Placements''' + +import logging + +from pecan import expose, request, response +from valet.api.common.i18n import _ +from valet.api.common.ostro_helper import Ostro +from valet.api.db.models import Placement, Plan +from valet.api.v1.controllers import error +from valet.api.v1.controllers import reserve_placement +from valet.api.v1.controllers import update_placements + + +LOG = logging.getLogger(__name__) + +# pylint: disable=R0201 + + +class PlacementsItemController(object): + ''' Placements Item Controller /v1/placements/{placement_id} ''' + + def __init__(self, uuid4): + '''Initializer.''' + self.uuid = uuid4 + self.placement = Placement.query.filter_by(id=self.uuid).first() # pylint: disable=E1101 + if not self.placement: + self.placement = Placement.query.filter_by(orchestration_id=self.uuid).first() # disable=E1101 + if not self.placement: + error('/errors/not_found', _('Placement not found')) + request.context['placement_id'] = self.placement.id + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET,POST,DELETE' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + ''' Inspect a placement. + + Use POST for reserving placements made by a scheduler. + ''' + return {"placement": self.placement} + + @index.when(method='POST', template='json') + def index_post(self, **kwargs): + ''' Reserve a placement. This and other placements may be replanned. + + Once reserved, the location effectively becomes immutable. + ''' + res_id = kwargs.get('resource_id') + LOG.info(_('Placement reservation request for resource id ' + '%(res_id)s, orchestration id %(orch_id)s.'), + {'res_id': res_id, 'orch_id': self.placement.orchestration_id}) + locations = kwargs.get('locations', []) + locations_str = ', '.join(locations) + LOG.info(_('Candidate locations: %s'), locations_str) + if self.placement.location in locations: + # Ostro's placement is in the list of candidates. Good! + # Reserve it. Remember the resource id too. + kwargs = {'resource_id': res_id} + reserve_placement(self.placement, **kwargs) + response.status = 201 + else: + # Ostro's placement is NOT in the list of candidates. + # Time for Plan B. + LOG.info(_('Placement of resource id %(res_id)s, ' + 'orchestration id %(orch_id)s in %(loc)s ' + 'not allowed. Replanning.'), + {'res_id': res_id, + 'orch_id': self.placement.orchestration_id, + 'loc': self.placement.location}) + + # Unreserve the placement. Remember the resource id too. + kwargs = {'resource_id': res_id, 'reserve': False} + reserve_placement(self.placement, **kwargs) + + # Find all the reserved placements for the related plan. + reserved = Placement.query.filter_by( # pylint: disable=E1101 + plan_id=self.placement.plan_id, reserved=True) + + # Keep this placement's orchestration ID handy. + orchestration_id = self.placement.orchestration_id + + # Extract all the orchestration IDs. + exclusions = [x.orchestration_id for x in reserved] + if exclusions: + exclusions_str = ', '.join(exclusions) + LOG.info(_('Excluded orchestration IDs: %s'), exclusions_str) + else: + LOG.info(_('No excluded orchestration IDs.')) + + # Ask Ostro to try again with new constraints. + # We may get one or more updated placements in return. + # One of those will be the original placement + # we are trying to reserve. + plan = Plan.query.filter_by(id=self.placement.plan_id).first() # pylint: disable=E1101 + + args = { + "stack_id": plan.stack_id, + "locations": locations, + "orchestration_id": orchestration_id, + "exclusions": exclusions, + } + ostro_kwargs = {"args": args, } + ostro = Ostro() + ostro.replan(**ostro_kwargs) + ostro.send() + + status_type = ostro.response['status']['type'] + if status_type != 'ok': + message = ostro.response['status']['message'] + error(ostro.error_uri, _('Ostro error: %s') % message) + + # Update all affected placements. Reserve the original one. + placements = ostro.response['resources'] + update_placements(placements, reserve_id=orchestration_id) + response.status = 201 + + placement = Placement.query.filter_by( # pylint: disable=E1101 + orchestration_id=self.placement.orchestration_id).first() + return {"placement": placement} + + @index.when(method='DELETE', template='json') + def index_delete(self): + '''Delete a Placement''' + orch_id = self.placement.orchestration_id + self.placement.delete() + LOG.info(_('Placement with orchestration id %s deleted.'), orch_id) + response.status = 204 + + +class PlacementsController(object): + ''' Placements Controller /v1/placements ''' + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Get placements.''' + placements_array = [] + for placement in Placement.query.all(): # pylint: disable=E1101 + placements_array.append(placement) + return {"placements": placements_array} + + @expose() + def _lookup(self, uuid4, *remainder): + '''Pecan subcontroller routing callback''' + return PlacementsItemController(uuid4), remainder diff --git a/valet/api/v1/controllers/plans.py b/valet/api/v1/controllers/plans.py new file mode 100644 index 0000000..e9ed7a0 --- /dev/null +++ b/valet/api/v1/controllers/plans.py @@ -0,0 +1,284 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Plans''' + +import logging + +from notario import decorators +from notario.validators import types +from pecan import expose, request, response +from pecan_notario import validate + +from valet.api.common.i18n import _ +from valet.api.common.ostro_helper import Ostro +from valet.api.db.models import Placement, Plan +from valet.api.v1.controllers import error +from valet.api.v1.controllers import set_placements +from valet.api.v1.controllers import update_placements +from valet.api.v1.controllers import valid_plan_update_action + +LOG = logging.getLogger(__name__) + +CREATE_SCHEMA = ( + ('plan_name', types.string), + ('resources', types.dictionary), + ('stack_id', types.string), + (decorators.optional('timeout'), types.string) +) + +UPDATE_SCHEMA = ( + ('action', valid_plan_update_action), + (decorators.optional('excluded_hosts'), types.array), + (decorators.optional('plan_name'), types.string), + # FIXME: resources needs to work against valid_plan_resources + ('resources', types.array), + (decorators.optional('timeout'), types.string) +) + +# pylint: disable=R0201 + + +class PlansItemController(object): + ''' Plans Item Controller /v1/plans/{plan_id} ''' + + def __init__(self, uuid4): + '''Initializer.''' + self.uuid = uuid4 + self.plan = Plan.query.filter_by(id=self.uuid).first() # pylint: disable=E1101 + + if not self.plan: + self.plan = Plan.query.filter_by(stack_id=self.uuid).first() # pylint: disable=E1101 + + if not self.plan: + error('/errors/not_found', _('Plan not found')) + request.context['plan_id'] = self.plan.id + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET,PUT,DELETE' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Get plan''' + return {"plan": self.plan} + + @index.when(method='PUT', template='json') + @validate(UPDATE_SCHEMA, '/errors/schema') + def index_put(self, **kwargs): + '''Update a Plan''' + + action = kwargs.get('action') + if action == 'migrate': + # Replan the placement of an existing resource. + excluded_hosts = kwargs.get('excluded_hosts', []) + resources = kwargs.get('resources', []) + + # TODO(JD): Support replan of more than one existing resource + if not isinstance(resources, list) or len(resources) != 1: + error('/errors/invalid', _('resources must be a list of length 1.')) + + # We either got a resource or orchestration id. + the_id = resources[0] + placement = Placement.query.filter_by(resource_id=the_id).first() # pylint: disable=E1101 + if not placement: + placement = Placement.query.filter_by(orchestration_id=the_id).first() # pylint: disable=E1101 + if not placement: + error('/errors/invalid', _('Unknown resource or orchestration id: %s') % the_id) + + LOG.info(_('Migration request for resource id {0}, orchestration id {1}.').format(placement.resource_id, placement.orchestration_id)) + args = { + "stack_id": self.plan.stack_id, + "excluded_hosts": excluded_hosts, + "orchestration_id": placement.orchestration_id, + } + ostro_kwargs = { + "args": args, + } + ostro = Ostro() + ostro.migrate(**ostro_kwargs) + ostro.send() + + status_type = ostro.response['status']['type'] + if status_type != 'ok': + message = ostro.response['status']['message'] + error(ostro.error_uri, _('Ostro error: %s') % message) + + placements = ostro.response['resources'] + update_placements(placements, unlock_all=True) + response.status = 201 + + # Flush so that the DB is current. + self.plan.flush() + self.plan = Plan.query.filter_by(stack_id=self.plan.stack_id).first() # pylint: disable=E1101 + LOG.info(_('Plan with stack id %s updated.'), self.plan.stack_id) + return {"plan": self.plan} + + # TODO(JD): Throw unimplemented error? + + # pylint: disable=W0612 + ''' + # FIXME: This is broken. Save for Valet 1.1 + # New placements are not being seen in the response, so + # set_placements is currently failing as a result. + ostro = Ostro() + args = request.json + + kwargs = { + 'tenant_id': request.context['tenant_id'], + 'args': args + } + + # Prepare the request. If request prep fails, + # an error message will be in the response. + # Though the Ostro helper reports the error, + # we cite it as a Valet error. + if not ostro.build_request(**kwargs): + message = ostro.response['status']['message'] + error(ostro.error_uri, _('Valet error: %s') % message) + + ostro.send() + status_type = ostro.response['status']['type'] + if status_type != 'ok': + message = ostro.response['status']['message'] + error(ostro.error_uri, _('Ostro error: %s') % message) + + # TODO(JD): Keep. See if we will eventually need these for Ostro. + #plan_name = args['plan_name'] + #stack_id = args['stack_id'] + resources = ostro.request['resources_update'] + placements = ostro.response['resources'] + + set_placements(self.plan, resources, placements) + response.status = 201 + + # Flush so that the DB is current. + self.plan.flush() + return self.plan + ''' + # pylint: enable=W0612 + + @index.when(method='DELETE', template='json') + def index_delete(self): + '''Delete a Plan''' + for placement in self.plan.placements(): + placement.delete() + stack_id = self.plan.stack_id + self.plan.delete() + LOG.info(_('Plan with stack id %s deleted.'), stack_id) + response.status = 204 + + +class PlansController(object): + ''' Plans Controller /v1/plans ''' + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET,POST' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Get all the plans''' + plans_array = [] + for plan in Plan.query.all(): # pylint: disable=E1101 + plans_array.append(plan) + return {"plans": plans_array} + + @index.when(method='POST', template='json') + @validate(CREATE_SCHEMA, '/errors/schema') + def index_post(self): + '''Create a Plan''' + ostro = Ostro() + args = request.json + + kwargs = { + 'tenant_id': request.context['tenant_id'], + 'args': args + } + + # Prepare the request. If request prep fails, + # an error message will be in the response. + # Though the Ostro helper reports the error, + # we cite it as a Valet error. + if not ostro.build_request(**kwargs): + message = ostro.response['status']['message'] + error(ostro.error_uri, _('Valet error: %s') % message) + + # If there are no serviceable resources, bail. Not an error. + # Treat it as if an "empty plan" was created. + # FIXME: Ostro should likely handle this and not error out. + if not ostro.is_request_serviceable(): + LOG.info(_('Plan has no serviceable resources. Skipping.')) + response.status = 201 + return {"plan": {}} + + ostro.send() + status_type = ostro.response['status']['type'] + if status_type != 'ok': + message = ostro.response['status']['message'] + error(ostro.error_uri, _('Ostro error: %s') % message) + + plan_name = args['plan_name'] + stack_id = args['stack_id'] + resources = ostro.request['resources'] + placements = ostro.response['resources'] + + plan = Plan(plan_name, stack_id) + if plan: + set_placements(plan, resources, placements) + response.status = 201 + + # Flush so that the DB is current. + plan.flush() + LOG.info(_('Plan with stack id %s created.'), plan.stack_id) + return {"plan": plan} + else: + error('/errors/server_error', _('Unable to create Plan.')) + + @expose() + def _lookup(self, uuid4, *remainder): + '''Pecan subcontroller routing callback''' + return PlansItemController(uuid4), remainder diff --git a/valet/api/v1/controllers/root.py b/valet/api/v1/controllers/root.py new file mode 100644 index 0000000..47ef2a5 --- /dev/null +++ b/valet/api/v1/controllers/root.py @@ -0,0 +1,90 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Root''' + +import logging + +from pecan import expose, request, response +from valet.api.common.i18n import _ +from valet.api.v1.controllers import error +from valet.api.v1.controllers.errors import ErrorsController, error_wrapper +from valet.api.v1.controllers.v1 import V1Controller + +from webob.exc import status_map + +LOG = logging.getLogger(__name__) + +# pylint: disable=R0201 + + +class RootController(object): + ''' Root Controller / ''' + + errors = ErrorsController() + v1 = V1Controller() # pylint: disable=C0103 + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Get canonical URL for each version''' + ver = { + "versions": + [ + { + "status": "CURRENT", + "id": "v1.0", + "links": + [ + { + "href": request.application_url + "/v1/", + "rel": "self" + } + ] + } + ] + } + + return ver + + @error_wrapper + def error(self, status): + '''Error handler''' + try: + status = int(status) + except ValueError: # pragma: no cover + status = 500 + message = getattr(status_map.get(status), 'explanation', '') + return dict(status=status, message=message) diff --git a/valet/api/v1/controllers/status.py b/valet/api/v1/controllers/status.py new file mode 100644 index 0000000..76924e1 --- /dev/null +++ b/valet/api/v1/controllers/status.py @@ -0,0 +1,90 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Status''' + +import logging + +from pecan import expose, request, response +from valet.api.common.i18n import _ +from valet.api.common.ostro_helper import Ostro +from valet.api.v1.controllers import error + +LOG = logging.getLogger(__name__) + +# pylint: disable=R0201 + + +class StatusController(object): + ''' Status Controller /v1/status ''' + + @classmethod + def _ping_ostro(cls): + '''Ping Ostro''' + ostro = Ostro() + ostro.ping() + ostro.send() + return ostro.response + + @classmethod + def _ping(cls): + '''Ping each subsystem.''' + ostro_response = StatusController._ping_ostro() + # TODO(JD): Ping Music plus any others. + # music_response = StatusController._ping_music() + + response = { + "status": { + "ostro": ostro_response, + # "music": music_response, + } + } + + return response + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'HEAD,GET' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='HEAD', template='json') + def index_head(self): + '''Ping each subsystem and return summary response''' + self._ping() # pylint: disable=W0612 + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Ping each subsystem and return detailed response''' + + _response = self._ping() + response.status = 200 + return _response diff --git a/valet/api/v1/controllers/v1.py b/valet/api/v1/controllers/v1.py new file mode 100644 index 0000000..8262446 --- /dev/null +++ b/valet/api/v1/controllers/v1.py @@ -0,0 +1,130 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''v1''' + +import logging + +from pecan import conf, expose, request, response +from pecan.secure import SecureController + +from valet.api.common.i18n import _ +from valet.api.v1.controllers import error +from valet.api.v1.controllers.groups import GroupsController +from valet.api.v1.controllers.placements import PlacementsController +from valet.api.v1.controllers.plans import PlansController +from valet.api.v1.controllers.status import StatusController + + +LOG = logging.getLogger(__name__) + +# pylint: disable=R0201 + + +class V1Controller(SecureController): + ''' v1 Controller /v1 ''' + + groups = GroupsController() + placements = PlacementsController() + plans = PlansController() + status = StatusController() + + # Update this whenever a new endpoint is made. + endpoints = ["groups", "placements", "plans", "status"] + + @classmethod + def check_permissions(cls): + '''SecureController permission check callback''' + token = None + auth_token = request.headers.get('X-Auth-Token') + msg = "Unauthorized - No auth token" + + if auth_token: + msg = "Unauthorized - invalid token" + # The token must have an admin role + # and be associated with a tenant. + token = conf.identity.engine.validate_token(auth_token) + + if token: + LOG.debug("Checking token permissions") + msg = "Unauthorized - Permission was not granted" + if V1Controller._permission_granted(request, token): + tenant_id = conf.identity.engine.tenant_from_token(token) + LOG.info("tenant_id - " + str(tenant_id)) + if tenant_id: + request.context['tenant_id'] = tenant_id + user_id = conf.identity.engine.user_from_token(token) + request.context['user_id'] = user_id + + return True + + error('/errors/unauthorized', msg) + + @classmethod + def _action_is_migrate(cls, request): + return "plan" in request.path and hasattr(request, "json") and "action" in request.json and request.json["action"] == "migrate" + + @classmethod + def _permission_granted(cls, request, token): + return not ("group" in request.path or + V1Controller._action_is_migrate(request)) or\ + (conf.identity.engine.is_token_admin(token)) + + @classmethod + def allow(cls): + '''Allowed methods''' + return 'GET' + + @expose(generic=True, template='json') + def index(self): + '''Catchall for unallowed methods''' + message = _('The %s method is not allowed.') % request.method + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + '''Options''' + response.headers['Allow'] = self.allow() + response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + '''Get canonical URL for each endpoint''' + links = [] + for endpoint in V1Controller.endpoints: + links.append({ + "href": "%(url)s/v1/%(endpoint)s/" % + { + 'url': request.application_url, + 'endpoint': endpoint + }, + "rel": "self" + }) + ver = { + "versions": + [ + { + "status": "CURRENT", + "id": "v1.0", + "links": links + } + ] + } + + return ver diff --git a/valet/api/wsgi.py b/valet/api/wsgi.py new file mode 100644 index 0000000..bc3be8a --- /dev/null +++ b/valet/api/wsgi.py @@ -0,0 +1,57 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''WSGI Wrapper''' + +from common.i18n import _ +import os +from pecan.deploy import deploy + + +def config_file(file_name=None): + """Returns absolute location of the config file""" + file_name = file_name or 'config.py' + _file = os.path.abspath(__file__) + + def dirname(x): + return os.path.dirname(x) + parent_dir = dirname(_file) + return os.path.join(parent_dir, file_name) + + +def application(environ, start_response): + """Returns a WSGI app object""" + wsgi_app = deploy(config_file('prod.py')) + return wsgi_app(environ, start_response) + +# TODO(JD): Integrate this with a python entry point +# This way we can run valet-api from the command line in a pinch. +if __name__ == '__main__': + from wsgiref.simple_server import make_server # disable=C0411,C0413 + + # TODO(JD): At some point, it would be nice to use pecan_mount + # import pecan_mount + # HTTPD = make_server('', 8090, pecan_mount.tree) + from valet.api.conf import register_conf, set_domain + register_conf() + set_domain() + HTTPD = make_server('', 8090, deploy(config_file('/var/www/valet/config.py'))) + print(_("Serving HTTP on port 8090...")) + + # Respond to requests until process is killed + HTTPD.serve_forever() diff --git a/valet/cli/__init__.py b/valet/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/cli/groupcli.py b/valet/cli/groupcli.py new file mode 100644 index 0000000..92f5c52 --- /dev/null +++ b/valet/cli/groupcli.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +import argparse +import json +from oslo_config import cfg +import requests +from valet.api.conf import register_conf, set_domain + +CONF = cfg.CONF + + +class ResponseError(Exception): + pass + + +class ConnectionError(Exception): + pass + + +def print_verbose(verbose, url, headers, body, rest_cmd, timeout): + if verbose: + print("Sending Request:\nurl: %s\nheaders: %s\nbody: %s\ncmd: %s\ntimeout: %d\n" + % (url, headers, body, rest_cmd.__name__ if rest_cmd is not None else None, timeout)) + + +def pretty_print_json(json_thing, sort=True, indents=4): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def add_to_parser(service_sub): + parser = service_sub.add_parser('group', help='Group Management', + formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30, + width=120)) + parser.add_argument('--version', action='version', version='%(prog)s 1.1') + parser.add_argument('--timeout', type=int, help='Set request timeout in seconds (default: 10)') + parser.add_argument('--host', type=str, help='Hostname or ip of valet server') + parser.add_argument('--port', type=str, help='Port number of valet server') + parser.add_argument('--os-tenant-name', type=str, help='Tenant name') + parser.add_argument('--os-user-name', dest='os_username', type=str, help='Username') + parser.add_argument('--os-password', type=str, help="User's password") + parser.add_argument('--verbose', '-v', help='Show details', action="store_true") + subparsers = parser.add_subparsers(dest='subcmd', metavar='') + + # create group + parser_create_group = subparsers.add_parser('create', help='Create new group.') + parser_create_group.add_argument('name', type=str, help='') + parser_create_group.add_argument('type', type=str, help=' (exclusivity)') + parser_create_group.add_argument('--description', type=str, help='') + + # delete group + parser_delete_group = subparsers.add_parser('delete', help='Delete specified group.') + parser_delete_group.add_argument('groupid', type=str, help='') + + # delete group member + parser_delete_group_member = subparsers.add_parser('delete-member', help='Delete members from specified group.') + parser_delete_group_member.add_argument('groupid', type=str, help='') + parser_delete_group_member.add_argument('memberid', type=str, help='') + + # delete all group members + parser_delete_all_group_members = subparsers.add_parser('delete-all-members', help='Delete all members from ' + 'specified group.') + parser_delete_all_group_members.add_argument('groupid', type=str, help='') + + # list group + subparsers.add_parser('list', help='List all groups.') + + # show group details + parser_show_group_details = subparsers.add_parser('show', help='Show details about the given group.') + parser_show_group_details.add_argument('groupid', type=str, help='') + + # update group + parser_update_group = subparsers.add_parser('update', help='Update group description.') + parser_update_group.add_argument('groupid', type=str, help='') + parser_update_group.add_argument('--description', type=str, help='') + + parser_update_group_members = subparsers.add_parser('update-member', help='Update group members.') + parser_update_group_members.add_argument('groupid', type=str, help='') + parser_update_group_members.add_argument('members', type=str, help='') + + return parser + + +def cmd_details(args): + if args.subcmd == 'create': + return requests.post, '' + elif args.subcmd == 'update': + return requests.put, '/%s' % args.groupid + elif args.subcmd == 'update-member': + return requests.put, '/%s/members' % args.groupid + elif args.subcmd == 'delete': + return requests.delete, '/%s' % (args.groupid) + elif args.subcmd == 'delete-all-members': + return requests.delete, '/%s/members' % (args.groupid) + elif args.subcmd == 'delete-member': + return requests.delete, '/%s/members/%s' % (args.groupid, args.memberid) + elif args.subcmd == 'show': + return requests.get, '/%s' % (args.groupid) + elif args.subcmd == 'list': + return requests.get, '' + + +def get_token(timeout, args): + # tenant_name = args.os_tenant_name if args.os_tenant_name else os.environ.get('OS_TENANT_NAME') + tenant_name = args.os_tenant_name if args.os_tenant_name else CONF.identity.project_name + auth_name = args.os_username if args.os_username else CONF.identity.username + password = args.os_password if args.os_password else CONF.identity.password + headers = { + 'Content-Type': 'application/json', + } + url = '%s/tokens' % CONF.identity.uth_url + data = ''' + { + "auth": { + "tenantName": "%s", + "passwordCredentials": { + "username": "%s", + "password": "%s" + } + } + }''' % (tenant_name, auth_name, password) + print_verbose(args.verbose, url, headers, data, None, timeout) + try: + resp = requests.post(url, timeout=timeout, data=data, headers=headers) + if resp.status_code != 200: + raise ResponseError( + 'Failed in get_token: status code received {}'.format( + resp.status_code)) + return resp.json()['access']['token']['id'] + except Exception as e: + message = 'Failed in get_token' + # logger.log_exception(message, str(e)) + print(e) + raise ConnectionError(message) + + +def populate_args_request_body(args): + body_args_list = ['name', 'type', 'description', 'members'] + # assign values to dictionary (if val exist). members will be assign as a list + body_dict = {} + for body_arg in body_args_list: + if hasattr(args, body_arg): + body_dict[body_arg] = getattr(args, body_arg) if body_arg != 'members' else [getattr(args, body_arg)] + # remove keys without values + filtered_body_dict = dict((k, v) for k, v in body_dict.iteritems() if v is not None) + # check if dictionary is not empty, convert body dictionary to json format + return json.dumps(filtered_body_dict) if bool(filtered_body_dict) else None + + +def run(args): + register_conf() + set_domain(project='valet') + args.host = args.host or CONF.server.host + args.port = args.port or CONF.server.port + args.timeout = args.timeout or 10 + rest_cmd, cmd_url = cmd_details(args) + args.url = 'http://%s:%s/v1/groups' % (args.host, args.port) + cmd_url + auth_token = get_token(args.timeout, args) + args.headers = { + 'content-type': 'application/json', + 'X-Auth-Token': auth_token + } + args.body = populate_args_request_body(args) + + try: + print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd, args.timeout) + if args.body: + resp = rest_cmd(args.url, timeout=args.timeout, data=args.body, headers=args.headers) + else: + resp = rest_cmd(args.url, timeout=args.timeout, headers=args.headers) + except Exception as e: + print(e) + exit(1) + + if not 200 <= resp.status_code < 300: + content = resp.json() if resp.status_code == 500 else '' + print('API error: %s %s (Reason: %d)\n%s' % (rest_cmd.func_name.upper(), args.url, resp.status_code, content)) + exit(1) + try: + if resp.content: + rj = resp.json() + pretty_print_json(rj) + except Exception as e: + print (e) + exit(1) diff --git a/valet/cli/valetcli.py b/valet/cli/valetcli.py new file mode 100755 index 0000000..76d7667 --- /dev/null +++ b/valet/cli/valetcli.py @@ -0,0 +1,37 @@ +#!/usr/bin/python +import argparse +import sys +import valet.cli.groupcli as groupcli +# import logging + + +class Cli(object): + def __init__(self): + self.args = None + self.submod = None + self.parser = None + + def create_parser(self): + self.parser = argparse.ArgumentParser(prog='valet', description='VALET REST CLI') + service_sub = self.parser.add_subparsers(dest='service', metavar='') + self.submod = {'group': groupcli} + for s in self.submod.values(): + s.add_to_parser(service_sub) + + def parse(self, argv=sys.argv): + sys.argv = argv + self.args = self.parser.parse_args() + + def logic(self): + self.submod[self.args.service].run(self.args) + + +def main(argv): + cli = Cli() + cli.create_parser() + cli.parse(argv) + cli.logic() + + +if __name__ == "__main__": + main(sys.argv) diff --git a/valet/engine/__init__.py b/valet/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/conf.py b/valet/engine/conf.py new file mode 100644 index 0000000..532b704 --- /dev/null +++ b/valet/engine/conf.py @@ -0,0 +1,82 @@ +from oslo_config import cfg +from valet.api import conf as api + +CONF = cfg.CONF + +ostro_cli_opts = [ + cfg.StrOpt('command', + short='c', + default='status', + help='engine command.'), +] + + +engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf') +engine_opts = [ + cfg.StrOpt('pid', default='/var/run/valet/ostro-daemon.pid'), + cfg.StrOpt('mode', default='live', + help='sim will let Ostro simulate datacenter, while live will let it handle a real datacenter'), + cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'), + cfg.BoolOpt('network_control', default=False, help='whether network controller (i.e., Tegu) has been deployed'), + cfg.StrOpt('network_control_url', default='http://network_control:29444/tegu/api'), + cfg.StrOpt('ip', default='localhost'), + cfg.IntOpt('priority', default=1, help='this instance priority (master=1)'), + cfg.StrOpt('rpc_server_ip', default='localhost', + help='Set RPC server ip and port if used. Otherwise, ignore these parameters'), + cfg.StrOpt('rpc_server_port', default='8002'), + cfg.StrOpt('logger_name', default='engine.log'), + cfg.StrOpt('logging_level', default='debug'), + cfg.StrOpt('logging_dir', default='/var/log/valet/'), + cfg.StrOpt('max_main_log_size', default=5000000), + cfg.IntOpt('max_log_size', default=1000000), + cfg.IntOpt('max_num_of_logs', default=20), + cfg.StrOpt('datacenter_name', default='bigsite', + help='Inform the name of datacenter (region name), where Valet/Ostro is deployed.'), + cfg.IntOpt('num_of_region_chars', default='3', help='number of chars that indicates the region code'), + cfg.StrOpt('rack_code_list', default='r', help='rack indicator.'), + cfg.ListOpt('node_code_list', default='a,c,u,f,o,p,s', + help='indicates the node type. a: network, c KVM compute, u: ESXi compute, f: ?, o: operation, ' + 'p: power, s: storage.'), + cfg.StrOpt('compute_trigger_time', default='1:00', + help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'), + cfg.IntOpt('compute_trigger_frequency', default=3600, + help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'), + cfg.StrOpt('topology_trigger_time', default='2:00', + help='Set trigger time or frequency for checking datacenter topology'), + cfg.IntOpt('topology_trigger_frequency', default=3600, + help='Set trigger time or frequency for checking datacenter topology'), + cfg.IntOpt('default_cpu_allocation_ratio', default=16, help='Set default overbooking ratios. ' + 'Note that each compute node can have its own ratios'), + cfg.IntOpt('default_ram_allocation_ratio', default=1.5, help='Set default overbooking ratios. ' + 'Note that each compute node can have its own ratios'), + cfg.IntOpt('default_disk_allocation_ratio', default=1, help='Set default overbooking ratios. ' + 'Note that each compute node can have its own ratios'), + cfg.IntOpt('static_cpu_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' + 'that are set aside for applications workload spikes.'), + cfg.IntOpt('static_mem_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' + 'that are set aside for applications workload spikes.'), + cfg.IntOpt('static_local_disk_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' + 'that are set aside for applications workload spikes.'), +] + +listener_group = cfg.OptGroup(name='events_listener', title='Valet Engine listener') +listener_opts = [ + cfg.StrOpt('exchange', default='nova'), + cfg.StrOpt('exchange_type', default='topic'), + cfg.BoolOpt('auto_delete', default=False), + cfg.StrOpt('output_format', default='dict'), + cfg.BoolOpt('store', default=True), + cfg.StrOpt('logging_level', default='debug'), + cfg.StrOpt('logging_loc', default='/var/log/valet/'), + cfg.StrOpt('logger_name', default='ostro_listener.log'), + cfg.IntOpt('max_main_log_size', default=5000000), +] + + +def register_conf(): + api.register_conf() + CONF.register_group(engine_group) + CONF.register_opts(engine_opts, engine_group) + CONF.register_group(listener_group) + CONF.register_opts(listener_opts, listener_group) + CONF.register_cli_opts(ostro_cli_opts) diff --git a/valet/engine/groups/__init__.py b/valet/engine/groups/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/listener/PKG-INFO b/valet/engine/listener/PKG-INFO new file mode 100644 index 0000000..5b2c5e1 --- /dev/null +++ b/valet/engine/listener/PKG-INFO @@ -0,0 +1,4 @@ +Metadata-Version: 1.2 +Name: ostro-listener +Version: 0.1.0 +Author-email: jdandrea@research.att.com diff --git a/valet/engine/listener/__init__.py b/valet/engine/listener/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/listener/listener_manager.py b/valet/engine/listener/listener_manager.py new file mode 100644 index 0000000..a16ce18 --- /dev/null +++ b/valet/engine/listener/listener_manager.py @@ -0,0 +1,165 @@ +''' +Created on Nov 29, 2016 + +@author: stack +''' + +from datetime import datetime +import json +import pika +import pprint +import threading +import traceback +from valet.api.db.models.music import Music +from valet.engine.listener.oslo_messages import OsloMessage +from valet.engine.optimizer.util.util import init_logger +import yaml + + +class ListenerManager(threading.Thread): + + def __init__(self, _t_id, _t_name, _config): + threading.Thread.__init__(self) + self.thread_id = _t_id + self.thread_name = _t_name + self.config = _config + self.listener_logger = init_logger(self.config.events_listener) + self.MUSIC = None + + def run(self): + '''Entry point + + Connect to localhost rabbitmq servers, use username:password@ipaddress:port. + The port is typically 5672, and the default username and password are guest and guest. + credentials = pika.PlainCredentials("guest", "PASSWORD") + ''' + try: + self.listener_logger.info("ListenerManager: start " + self.thread_name + " ......") + + if self.config.events_listener.store: + + kwargs = { + 'host': self.config.music.host, + 'port': self.config.music.port, + 'replication_factor': self.config.music.replication_factor, + } + engine = Music(**kwargs) + engine.create_keyspace(self.config.music.keyspace) + self.MUSIC = {'engine': engine, 'keyspace': self.config.music.keyspace} + self.listener_logger.debug('Storing in music on %s, keyspace %s' % (self.config.music.host, self.config.music.keyspace)) + + self.listener_logger.debug('Connecting to %s, with %s' % (self.config.messaging.host, self.config.messaging.username)) + credentials = pika.PlainCredentials(self.config.messaging.username, self.config.messaging.password) + parameters = pika.ConnectionParameters(self.config.messaging.host, self.config.messaging.port, '/', credentials) + + connection = pika.BlockingConnection(parameters) + channel = connection.channel() + + # Select the exchange we want our queue to connect to + exchange_name = self.config.events_listener.exchange + exchange_type = self.config.events_listener.exchange_type + auto_delete = self.config.events_listener.auto_delete + + # Use the binding key to select what type of messages you want + # to receive. '#' is a wild card -- meaning receive all messages + binding_key = "#" + + # Check whether or not an exchange with the given name and type exists. + # Make sure that the exchange is multicast "fanout" or "topic" type + # otherwise our queue will consume the messages intended for other queues + channel.exchange_declare(exchange=exchange_name, + exchange_type=exchange_type, + auto_delete=auto_delete) + + # Create an empty queue + result = channel.queue_declare(exclusive=True) + queue_name = result.method.queue + + # Bind the queue to the selected exchange + channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=binding_key) + self.listener_logger.info('Channel is bound, listening on %s exchange %s', self.config.messaging.host, self.config.events_listener.exchange) + + # Start consuming messages + channel.basic_consume(self.on_message, queue_name) + except Exception: + self.listener_logger.error(traceback.format_exc()) + return + + try: + channel.start_consuming() + except KeyboardInterrupt: + channel.stop_consuming() + + # Close the channel on keyboard interrupt + channel.close() + connection.close() + + def on_message(self, channel, method_frame, _, body): # pylint: disable=W0613 + '''Specify the action to be taken on a message received''' + message = yaml.load(body) + try: + if 'oslo.message' in message.keys(): + message = yaml.load(message['oslo.message']) + if self.is_message_wanted(message): + if self.MUSIC and self.MUSIC.get('engine'): + self.store_message(message) + else: + return + + self.listener_logger.debug("\nMessage No: %s\n", method_frame.delivery_tag) + message_obj = yaml.load(body) + if 'oslo.message' in message_obj.keys(): + message_obj = yaml.load(message_obj['oslo.message']) + if self.config.events_listener.output_format == 'json': + self.listener_logger.debug(json.dumps(message_obj, sort_keys=True, indent=2)) + elif self.config.events_listener.output_format == 'yaml': + self.listener_logger.debug(yaml.dump(message_obj)) + else: + self.listener_logger.debug(pprint.pformat(message_obj)) + channel.basic_ack(delivery_tag=method_frame.delivery_tag) + except Exception: + self.listener_logger.error(traceback.format_exc()) + return + + def is_message_wanted(self, message): + ''' Based on markers from Ostro, determine if this is a wanted message. ''' + method = message.get('method', None) + args = message.get('args', None) + + nova_props = {'nova_object.changes', 'nova_object.data', 'nova_object.name'} + args_props = {'filter_properties', 'instance'} + + is_data = method and args + is_nova = is_data and 'objinst' in args and nova_props.issubset(args['objinst']) + + action_instance = is_nova and method == 'object_action' and self.is_nova_name(args) and self.is_nova_state(args) + + action_compute = is_nova and self.is_compute_name(args) + create_instance = is_data and method == 'build_and_run_instance' and args_props.issubset(args) and 'nova_object.data' in args['instance'] + + return action_instance or action_compute or create_instance + + def store_message(self, message): + '''Store message in Music''' + timestamp = datetime.now().isoformat() + args = json.dumps(message.get('args', None)) + exchange = self.config.events_listener.exchange + method = message.get('method', None) + + kwargs = { + 'timestamp': timestamp, + 'args': args, + 'exchange': exchange, + 'method': method, + 'database': self.MUSIC, + } + OsloMessage(**kwargs) # pylint: disable=W0612 + + def is_nova_name(self, args): + return args['objinst']['nova_object.name'] == 'Instance' + + def is_nova_state(self, args): + return args['objinst']['nova_object.data']['vm_state'] in ['deleted', 'active'] + + def is_compute_name(self, args): + return args['objinst']['nova_object.name'] == 'ComputeNode' diff --git a/valet/engine/listener/oslo_messages.py b/valet/engine/listener/oslo_messages.py new file mode 100644 index 0000000..71230f3 --- /dev/null +++ b/valet/engine/listener/oslo_messages.py @@ -0,0 +1,95 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +'''OsloMessage Database Model''' + +# This is based on Music models used in Valet. + +import uuid + + +class OsloMessage(object): + __tablename__ = 'oslo_messages' + + _database = None + + timestamp = None + args = None + exchange = None + method = None + + @classmethod + def schema(cls): + '''Return schema.''' + schema = { + 'timestamp': 'text', + 'args': 'text', + 'exchange': 'text', + 'method': 'text', + 'PRIMARY KEY': '(timestamp)' + } + return schema + + @classmethod + def pk_name(cls): + return 'timestamp' + + def pk_value(self): + return self.timestamp + + def insert(self): + '''Insert row.''' + keyspace = self._database.get('keyspace') + kwargs = { + 'keyspace': keyspace, + 'table': self.__tablename__, + 'values': self.values() + } + pk_name = self.pk_name() + if pk_name not in kwargs['values']: + the_id = str(uuid.uuid4()) + kwargs['values'][pk_name] = the_id + setattr(self, pk_name, the_id) + engine = self._database.get('engine') + engine.create_row(**kwargs) + + def values(self): + return { + 'timestamp': self.timestamp, + 'args': self.args, + 'exchange': self.exchange, + 'method': self.method, + } + + def __init__(self, timestamp, args, exchange, + method, database, _insert=True): + self._database = database + self.timestamp = timestamp + self.args = args + self.exchange = exchange + self.method = method + if _insert: + self.insert() + + def __json__(self): + json_ = {} + json_['timestamp'] = self.timestamp + json_['args'] = self.args + json_['exchange'] = self.exchange + json_['method'] = self.method + return json_ diff --git a/valet/engine/optimizer/__init__.py b/valet/engine/optimizer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/optimizer/app_manager/__init__.py b/valet/engine/optimizer/app_manager/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/optimizer/app_manager/app_handler.py b/valet/engine/optimizer/app_manager/app_handler.py new file mode 100755 index 0000000..47e53b2 --- /dev/null +++ b/valet/engine/optimizer/app_manager/app_handler.py @@ -0,0 +1,285 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + + +import json + +from valet.engine.optimizer.app_manager.app_topology import AppTopology +from valet.engine.optimizer.app_manager.app_topology_base import VM +from valet.engine.optimizer.app_manager.application import App +from valet.engine.optimizer.util import util as util + + +class AppHandler(object): + + def __init__(self, _resource, _db, _config, _logger): + self.resource = _resource + self.db = _db + self.config = _config + self.logger = _logger + + ''' current app requested, a temporary copy ''' + self.apps = {} + + self.last_log_index = 0 + + self.status = "success" + + def add_app(self, _app_data): + self.apps.clear() + + app_topology = AppTopology(self.resource, self.logger) + + for app in _app_data: + self.logger.debug("AppHandler: parse app") + + stack_id = None + if "stack_id" in app.keys(): + stack_id = app["stack_id"] + else: + stack_id = "none" + + application_name = None + if "application_name" in app.keys(): + application_name = app["application_name"] + else: + application_name = "none" + + action = app["action"] + if action == "ping": + self.logger.debug("AppHandler: got ping") + elif action == "replan" or action == "migrate": + re_app = self._regenerate_app_topology(stack_id, app, app_topology, action) + if re_app is None: + self.apps[stack_id] = None + self.status = "cannot locate the original plan for stack = " + stack_id + return None + + if action == "replan": + self.logger.debug("AppHandler: got replan: " + stack_id) + elif action == "migrate": + self.logger.debug("AppHandler: got migration: " + stack_id) + + app_id = app_topology.set_app_topology(re_app) + + if app_id is None: + self.logger.error("AppHandler: " + app_topology.status) + self.status = app_topology.status + self.apps[stack_id] = None + return None + else: + app_id = app_topology.set_app_topology(app) + + if app_id is None: + self.logger.error("AppHandler: " + app_topology.status) + self.status = app_topology.status + self.apps[stack_id] = None + return None + + new_app = App(stack_id, application_name, action) + self.apps[stack_id] = new_app + + return app_topology + + def add_placement(self, _placement_map, _timestamp): + for v in _placement_map.keys(): + if self.apps[v.app_uuid].status == "requested": + self.apps[v.app_uuid].status = "scheduled" + self.apps[v.app_uuid].timestamp_scheduled = _timestamp + + if isinstance(v, VM): + self.apps[v.app_uuid].add_vm(v, _placement_map[v]) + # elif isinstance(v, Volume): + # self.apps[v.app_uuid].add_volume(v, _placement_map[v]) + else: + if _placement_map[v] in self.resource.hosts.keys(): + host = self.resource.hosts[_placement_map[v]] + if v.level == "host": + self.apps[v.app_uuid].add_vgroup(v, host.name) + else: + hg = self.resource.host_groups[_placement_map[v]] + if v.level == hg.host_type: + self.apps[v.app_uuid].add_vgroup(v, hg.name) + + if self._store_app_placements() is False: + # NOTE: ignore? + pass + + def _store_app_placements(self): + (app_logfile, last_index, mode) = util.get_last_logfile( + self.config.app_log_loc, self.config.max_log_size, self.config.max_num_of_logs, + self.resource.datacenter.name, self.last_log_index) + self.last_log_index = last_index + + # TODO(GJ): error handling + + logging = open(self.config.app_log_loc + app_logfile, mode) + + for appk, app in self.apps.iteritems(): + json_log = app.log_in_info() + log_data = json.dumps(json_log) + + logging.write(log_data) + logging.write("\n") + + logging.close() + + self.logger.info("AppHandler: log app in " + app_logfile) + + if self.db is not None: + for appk, app in self.apps.iteritems(): + json_info = app.get_json_info() + if self.db.add_app(appk, json_info) is False: + return False + + if self.db.update_app_log_index(self.resource.datacenter.name, self.last_log_index) is False: + return False + + return True + + def remove_placement(self): + if self.db is not None: + for appk, _ in self.apps.iteritems(): + if self.db.add_app(appk, None) is False: + self.logger.error("AppHandler: error while adding app info to MUSIC") + # NOTE: ignore? + + def get_vm_info(self, _s_uuid, _h_uuid, _host): + vm_info = {} + + if _h_uuid is not None and _h_uuid != "none" and \ + _s_uuid is not None and _s_uuid != "none": + vm_info = self.db.get_vm_info(_s_uuid, _h_uuid, _host) + + return vm_info + + def update_vm_info(self, _s_uuid, _h_uuid): + s_uuid_exist = bool(_s_uuid is not None and _s_uuid != "none") + h_uuid_exist = bool(_h_uuid is not None and _h_uuid != "none") + if s_uuid_exist and h_uuid_exist: + return self.db.update_vm_info(_s_uuid, _h_uuid) + return True + + def _regenerate_app_topology(self, _stack_id, _app, _app_topology, _action): + re_app = {} + + old_app = self.db.get_app_info(_stack_id) + if old_app is None: + self.status = "error while getting old_app from MUSIC" + self.logger.error("AppHandler: " + self.status) + return None + elif len(old_app) == 0: + self.status = "cannot find the old app in MUSIC" + self.logger.error("AppHandler: " + self.status) + return None + + re_app["action"] = "create" + re_app["stack_id"] = _stack_id + + resources = {} + diversity_groups = {} + exclusivity_groups = {} + + if "VMs" in old_app.keys(): + for vmk, vm in old_app["VMs"].iteritems(): + resources[vmk] = {} + resources[vmk]["name"] = vm["name"] + resources[vmk]["type"] = "OS::Nova::Server" + properties = {} + properties["flavor"] = vm["flavor"] + if vm["availability_zones"] != "none": + properties["availability_zone"] = vm["availability_zones"] + resources[vmk]["properties"] = properties + + if len(vm["diversity_groups"]) > 0: + for divk, level_name in vm["diversity_groups"].iteritems(): + div_id = divk + ":" + level_name + if div_id not in diversity_groups.keys(): + diversity_groups[div_id] = [] + diversity_groups[div_id].append(vmk) + + if len(vm["exclusivity_groups"]) > 0: + for exk, level_name in vm["exclusivity_groups"].iteritems(): + ex_id = exk + ":" + level_name + if ex_id not in exclusivity_groups.keys(): + exclusivity_groups[ex_id] = [] + exclusivity_groups[ex_id].append(vmk) + + if _action == "replan": + if vmk == _app["orchestration_id"]: + _app_topology.candidate_list_map[vmk] = _app["locations"] + + self.logger.debug("AppHandler: re-requested vm = " + vm["name"] + " in") + for hk in _app["locations"]: + self.logger.debug(" " + hk) + + elif vmk in _app["exclusions"]: + _app_topology.planned_vm_map[vmk] = vm["host"] + + self.logger.debug("AppHandler: exception from replan = " + vm["name"]) + + elif _action == "migrate": + if vmk == _app["orchestration_id"]: + _app_topology.exclusion_list_map[vmk] = _app["excluded_hosts"] + if vm["host"] not in _app["excluded_hosts"]: + _app_topology.exclusion_list_map[vmk].append(vm["host"]) + else: + _app_topology.planned_vm_map[vmk] = vm["host"] + + _app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"], vm["mem"], vm["local_volume"]) + + if "VGroups" in old_app.keys(): + for gk, affinity in old_app["VGroups"].iteritems(): + resources[gk] = {} + resources[gk]["type"] = "ATT::Valet::GroupAssignment" + properties = {} + properties["group_type"] = "affinity" + properties["group_name"] = affinity["name"] + properties["level"] = affinity["level"] + properties["resources"] = [] + for r in affinity["subvgroup_list"]: + properties["resources"].append(r) + resources[gk]["properties"] = properties + + if len(affinity["diversity_groups"]) > 0: + for divk, level_name in affinity["diversity_groups"].iteritems(): + div_id = divk + ":" + level_name + if div_id not in diversity_groups.keys(): + diversity_groups[div_id] = [] + diversity_groups[div_id].append(gk) + + if len(affinity["exclusivity_groups"]) > 0: + for exk, level_name in affinity["exclusivity_groups"].iteritems(): + ex_id = exk + ":" + level_name + if ex_id not in exclusivity_groups.keys(): + exclusivity_groups[ex_id] = [] + exclusivity_groups[ex_id].append(gk) + + # NOTE: skip pipes in this version + + for div_id, resource_list in diversity_groups.iteritems(): + divk_level_name = div_id.split(":") + resources[divk_level_name[0]] = {} + resources[divk_level_name[0]]["type"] = "ATT::Valet::GroupAssignment" + properties = {} + properties["group_type"] = "diversity" + properties["group_name"] = divk_level_name[2] + properties["level"] = divk_level_name[1] + properties["resources"] = resource_list + resources[divk_level_name[0]]["properties"] = properties + + for ex_id, resource_list in exclusivity_groups.iteritems(): + exk_level_name = ex_id.split(":") + resources[exk_level_name[0]] = {} + resources[exk_level_name[0]]["type"] = "ATT::Valet::GroupAssignment" + properties = {} + properties["group_type"] = "exclusivity" + properties["group_name"] = exk_level_name[2] + properties["level"] = exk_level_name[1] + properties["resources"] = resource_list + resources[exk_level_name[0]]["properties"] = properties + + re_app["resources"] = resources + + return re_app diff --git a/valet/engine/optimizer/app_manager/app_topology.py b/valet/engine/optimizer/app_manager/app_topology.py new file mode 100755 index 0000000..249d9c5 --- /dev/null +++ b/valet/engine/optimizer/app_manager/app_topology.py @@ -0,0 +1,219 @@ +#!/bin/python + +# Modified: Sep. 22, 2016 + + +from valet.engine.optimizer.app_manager.app_topology_base import VM, VGroup +from valet.engine.optimizer.app_manager.app_topology_parser import Parser + + +class AppTopology(object): + + def __init__(self, _resource, _logger): + self.vgroups = {} + self.vms = {} + self.volumes = {} + + ''' for replan ''' + self.old_vm_map = {} + self.planned_vm_map = {} + self.candidate_list_map = {} + + ''' for migration-tip ''' + self.exclusion_list_map = {} + + self.resource = _resource + self.logger = _logger + + ''' restriction of host naming convention ''' + high_level_allowed = True + if "none" in self.resource.datacenter.region_code_list: + high_level_allowed = False + + self.parser = Parser(high_level_allowed, self.logger) + + self.total_nw_bandwidth = 0 + self.total_CPU = 0 + self.total_mem = 0 + self.total_local_vol = 0 + self.total_vols = {} + self.optimization_priority = None + + self.status = "success" + + ''' parse and set each app ''' + def set_app_topology(self, _app_graph): + (vgroups, vms, volumes) = self.parser.set_topology(_app_graph) + + if len(vgroups) == 0 and len(vms) == 0 and len(volumes) == 0: + self.status = self.parser.status + return None + + ''' cumulate virtual resources ''' + for _, vgroup in vgroups.iteritems(): + self.vgroups[vgroup.uuid] = vgroup + for _, vm in vms.iteritems(): + self.vms[vm.uuid] = vm + for _, vol in volumes.iteritems(): + self.volumes[vol.uuid] = vol + + return self.parser.stack_id, self.parser.application_name, self.parser.action + + def set_weight(self): + for _, vm in self.vms.iteritems(): + self._set_vm_weight(vm) + for _, vg in self.vgroups.iteritems(): + self._set_vm_weight(vg) + + for _, vg in self.vgroups.iteritems(): + self._set_vgroup_resource(vg) + + for _, vg in self.vgroups.iteritems(): + self._set_vgroup_weight(vg) + + def _set_vm_weight(self, _v): + if isinstance(_v, VGroup): + for _, sg in _v.subvgroups.iteritems(): + self._set_vm_weight(sg) + else: + if self.resource.CPU_avail > 0: + _v.vCPU_weight = float(_v.vCPUs) / float(self.resource.CPU_avail) + else: + _v.vCPU_weight = 1.0 + self.total_CPU += _v.vCPUs + + if self.resource.mem_avail > 0: + _v.mem_weight = float(_v.mem) / float(self.resource.mem_avail) + else: + _v.mem_weight = 1.0 + self.total_mem += _v.mem + + if self.resource.local_disk_avail > 0: + _v.local_volume_weight = float(_v.local_volume_size) / float(self.resource.local_disk_avail) + else: + if _v.local_volume_size > 0: + _v.local_volume_weight = 1.0 + else: + _v.local_volume_weight = 0.0 + self.total_local_vol += _v.local_volume_size + + bandwidth = _v.nw_bandwidth + _v.io_bandwidth + + if self.resource.nw_bandwidth_avail > 0: + _v.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail) + else: + if bandwidth > 0: + _v.bandwidth_weight = 1.0 + else: + _v.bandwidth_weight = 0.0 + + self.total_nw_bandwidth += bandwidth + + def _set_vgroup_resource(self, _vg): + if isinstance(_vg, VM): + return + for _, sg in _vg.subvgroups.iteritems(): + self._set_vgroup_resource(sg) + _vg.vCPUs += sg.vCPUs + _vg.mem += sg.mem + _vg.local_volume_size += sg.local_volume_size + + def _set_vgroup_weight(self, _vgroup): + if self.resource.CPU_avail > 0: + _vgroup.vCPU_weight = float(_vgroup.vCPUs) / float(self.resource.CPU_avail) + else: + if _vgroup.vCPUs > 0: + _vgroup.vCPU_weight = 1.0 + else: + _vgroup.vCPU_weight = 0.0 + + if self.resource.mem_avail > 0: + _vgroup.mem_weight = float(_vgroup.mem) / float(self.resource.mem_avail) + else: + if _vgroup.mem > 0: + _vgroup.mem_weight = 1.0 + else: + _vgroup.mem_weight = 0.0 + + if self.resource.local_disk_avail > 0: + _vgroup.local_volume_weight = float(_vgroup.local_volume_size) / float(self.resource.local_disk_avail) + else: + if _vgroup.local_volume_size > 0: + _vgroup.local_volume_weight = 1.0 + else: + _vgroup.local_volume_weight = 0.0 + + bandwidth = _vgroup.nw_bandwidth + _vgroup.io_bandwidth + + if self.resource.nw_bandwidth_avail > 0: + _vgroup.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail) + else: + if bandwidth > 0: + _vgroup.bandwidth_weight = 1.0 + else: + _vgroup.bandwidth_weight = 0.0 + + for _, svg in _vgroup.subvgroups.iteritems(): + if isinstance(svg, VGroup): + self._set_vgroup_weight(svg) + + def set_optimization_priority(self): + if len(self.vgroups) == 0 and len(self.vms) == 0 and len(self.volumes) == 0: + return + + app_nw_bandwidth_weight = -1 + if self.resource.nw_bandwidth_avail > 0: + app_nw_bandwidth_weight = float(self.total_nw_bandwidth) / float(self.resource.nw_bandwidth_avail) + else: + if self.total_nw_bandwidth > 0: + app_nw_bandwidth_weight = 1.0 + else: + app_nw_bandwidth_weight = 0.0 + + app_CPU_weight = -1 + if self.resource.CPU_avail > 0: + app_CPU_weight = float(self.total_CPU) / float(self.resource.CPU_avail) + else: + if self.total_CPU > 0: + app_CPU_weight = 1.0 + else: + app_CPU_weight = 0.0 + + app_mem_weight = -1 + if self.resource.mem_avail > 0: + app_mem_weight = float(self.total_mem) / float(self.resource.mem_avail) + else: + if self.total_mem > 0: + app_mem_weight = 1.0 + else: + app_mem_weight = 0.0 + + app_local_vol_weight = -1 + if self.resource.local_disk_avail > 0: + app_local_vol_weight = float(self.total_local_vol) / float(self.resource.local_disk_avail) + else: + if self.total_local_vol > 0: + app_local_vol_weight = 1.0 + else: + app_local_vol_weight = 0.0 + + total_vol_list = [] + for vol_class in self.total_vols.keys(): + total_vol_list.append(self.total_vols[vol_class]) + + app_vol_weight = -1 + if self.resource.disk_avail > 0: + app_vol_weight = float(sum(total_vol_list)) / float(self.resource.disk_avail) + else: + if sum(total_vol_list) > 0: + app_vol_weight = 1.0 + else: + app_vol_weight = 0.0 + + opt = [("bw", app_nw_bandwidth_weight), + ("cpu", app_CPU_weight), + ("mem", app_mem_weight), + ("lvol", app_local_vol_weight), + ("vol", app_vol_weight)] + + self.optimization_priority = sorted(opt, key=lambda resource: resource[1], reverse=True) diff --git a/valet/engine/optimizer/app_manager/app_topology_base.py b/valet/engine/optimizer/app_manager/app_topology_base.py new file mode 100755 index 0000000..00e5c41 --- /dev/null +++ b/valet/engine/optimizer/app_manager/app_topology_base.py @@ -0,0 +1,257 @@ +#!/bin/python + +# Modified: Sep. 22, 2016 + + +LEVELS = ["host", "rack", "cluster"] + + +class VGroup(object): + + def __init__(self, _app_uuid, _uuid): + self.app_uuid = _app_uuid + self.uuid = _uuid + self.name = None + + self.status = "requested" + + self.vgroup_type = "AFF" # Support Affinity group at this version + self.level = None # host, rack, or cluster + + self.survgroup = None # where this vgroup belong to + self.subvgroups = {} # child vgroups + + self.vgroup_list = [] # a list of links to VMs or Volumes + + self.diversity_groups = {} # cumulative diversity/exclusivity group + self.exclusivity_groups = {} # over this level. key=name, value=level + + self.availability_zone_list = [] + # self.host_aggregates = {} # cumulative aggregates + self.extra_specs_list = [] # cumulative extra_specs + + self.vCPUs = 0 + self.mem = 0 # MB + self.local_volume_size = 0 # GB + self.volume_sizes = {} # key = volume_class_name, value = size + self.nw_bandwidth = 0 # Mbps + self.io_bandwidth = 0 # Mbps + + self.vCPU_weight = -1 + self.mem_weight = -1 + self.local_volume_weight = -1 + self.volume_weight = -1 # averge of all storage classes + self.bandwidth_weight = -1 + + self.host = None + + def get_json_info(self): + survgroup_id = None + if self.survgroup is None: + survgroup_id = "none" + else: + survgroup_id = self.survgroup.uuid + + subvgroup_list = [] + for vk in self.subvgroups.keys(): + subvgroup_list.append(vk) + + link_list = [] + for l in self.vgroup_list: + link_list.append(l.get_json_info()) + + return {'name': self.name, + 'status': self.status, + 'vgroup_type': self.vgroup_type, + 'level': self.level, + 'survgroup': survgroup_id, + 'subvgroup_list': subvgroup_list, + 'link_list': link_list, + 'diversity_groups': self.diversity_groups, + 'exclusivity_groups': self.exclusivity_groups, + 'availability_zones': self.availability_zone_list, + # 'host_aggregates':host_aggregates, + 'extra_specs_list': self.extra_specs_list, + 'cpus': self.vCPUs, + 'mem': self.mem, + 'local_volume': self.local_volume_size, + 'volumes': self.volume_sizes, + 'nw_bandwidth': self.nw_bandwidth, + 'io_bandwidth': self.io_bandwidth, + 'cpu_weight': self.vCPU_weight, + 'mem_weight': self.mem_weight, + 'local_volume_weight': self.local_volume_weight, + 'volume_weight': self.volume_weight, + 'bandwidth_weight': self.bandwidth_weight, + 'host': self.host} + + +class VM(object): + + def __init__(self, _app_uuid, _uuid): + self.app_uuid = _app_uuid + self.uuid = _uuid + self.name = None + + self.status = "requested" + + self.survgroup = None # VGroup where this vm belongs to + + self.volume_list = [] # a list of links to Volumes + self.vm_list = [] # a list of links to VMs + + self.diversity_groups = {} + self.exclusivity_groups = {} + + self.availability_zone = None + # self.host_aggregates = {} + self.extra_specs_list = [] + + self.flavor = None + self.vCPUs = 0 + self.mem = 0 # MB + self.local_volume_size = 0 # GB + self.nw_bandwidth = 0 + self.io_bandwidth = 0 + + self.vCPU_weight = -1 + self.mem_weight = -1 + self.local_volume_weight = -1 + self.bandwidth_weight = -1 + + self.host = None # where this vm is placed + + def get_json_info(self): + survgroup_id = None + if self.survgroup is None: + survgroup_id = "none" + else: + survgroup_id = self.survgroup.uuid + + vm_list = [] + for vml in self.vm_list: + vm_list.append(vml.get_json_info()) + + vol_list = [] + for voll in self.volume_list: + vol_list.append(voll.get_json_info()) + + availability_zone = None + if self.availability_zone is None: + availability_zone = "none" + else: + availability_zone = self.availability_zone + + return {'name': self.name, + 'status': self.status, + 'survgroup': survgroup_id, + 'vm_list': vm_list, + 'volume_list': vol_list, + 'diversity_groups': self.diversity_groups, + 'exclusivity_groups': self.exclusivity_groups, + 'availability_zones': availability_zone, + # 'host_aggregates':host_aggregates, + 'extra_specs_list': self.extra_specs_list, + 'flavor': self.flavor, + 'cpus': self.vCPUs, + 'mem': self.mem, + 'local_volume': self.local_volume_size, + 'nw_bandwidth': self.nw_bandwidth, + 'io_bandwidth': self.io_bandwidth, + 'cpu_weight': self.vCPU_weight, + 'mem_weight': self.mem_weight, + 'local_volume_weight': self.local_volume_weight, + 'bandwidth_weight': self.bandwidth_weight, + 'host': self.host} + + +class Volume(object): + + def __init__(self, _app_uuid, _uuid): + self.app_uuid = _app_uuid + self.uuid = _uuid + self.name = None + + self.status = "requested" + + self.volume_class = None + + self.survgroup = None # where this vm belongs to + + self.vm_list = [] # a list of links to VMs + + self.diversity_groups = {} + self.exclusivity_groups = {} + + self.volume_size = 0 # GB + self.io_bandwidth = 0 + + self.volume_weight = -1 + self.bandwidth_weight = -1 + + self.storage_host = None + + def get_json_info(self): + survgroup_id = None + if self.survgroup is None: + survgroup_id = "none" + else: + survgroup_id = self.survgroup.uuid + + volume_class = None + if self.volume_class is None: + volume_class = "none" + else: + volume_class = self.volume_class + + vm_list = [] + for vml in self.vm_list: + vm_list.append(vml.get_json_info()) + + return {'name': self.name, + 'status': self.status, + 'class': volume_class, + 'survgroup': survgroup_id, + 'vm_list': vm_list, + 'diversity_groups': self.diversity_groups, + 'exclusivity_groups': self.exclusivity_groups, + 'volume': self.volume_size, + 'io_bandwidth': self.io_bandwidth, + 'volume_weight': self.volume_weight, + 'bandwidth_weight': self.bandwidth_weight, + 'host': self.storage_host} + + +class VGroupLink(object): + + def __init__(self, _n): + self.node = _n # target VM or Volume + self.nw_bandwidth = 0 + self.io_bandwidth = 0 + + def get_json_info(self): + return {'target': self.node.uuid, + 'nw_bandwidth': self.nw_bandwidth, + 'io_bandwidth': self.io_bandwidth} + + +class VMLink(object): + + def __init__(self, _n): + self.node = _n # target VM + self.nw_bandwidth = 0 # Mbps + + def get_json_info(self): + return {'target': self.node.uuid, + 'nw_bandwidth': self.nw_bandwidth} + + +class VolumeLink(object): + + def __init__(self, _n): + self.node = _n # target Volume + self.io_bandwidth = 0 # Mbps + + def get_json_info(self): + return {'target': self.node.uuid, + 'io_bandwidth': self.io_bandwidth} diff --git a/valet/engine/optimizer/app_manager/app_topology_parser.py b/valet/engine/optimizer/app_manager/app_topology_parser.py new file mode 100755 index 0000000..a7a2550 --- /dev/null +++ b/valet/engine/optimizer/app_manager/app_topology_parser.py @@ -0,0 +1,641 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + +from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VGroupLink, VM, VMLink, LEVELS + + +''' +- Restrictions of nested groups: EX in EX, EX in DIV, DIV in EX, DIV in DIV +- VM/group cannot exist in multiple EX groups +- Nested group's level cannot be higher than nesting group +- No supporting the following Heat components + OS::Nova::ServerGroup + OS::Heat::AutoScalingGroup + OS::Heat::Stack + OS::Heat::ResourceGroup + OS::Heat::ResourceGroup +''' + + +class Parser(object): + + def __init__(self, _high_level_allowed, _logger): + self.logger = _logger + + self.high_level_allowed = _high_level_allowed + + self.format_version = None + self.stack_id = None # used as application id + self.application_name = None + self.action = None # [create|update|ping] + + self.status = "success" + + def set_topology(self, _graph): + if "version" in _graph.keys(): + self.format_version = _graph["version"] + else: + self.format_version = "0.0" + + if "stack_id" in _graph.keys(): + self.stack_id = _graph["stack_id"] + else: + self.stack_id = "none" + + if "application_name" in _graph.keys(): + self.application_name = _graph["application_name"] + else: + self.application_name = "none" + + if "action" in _graph.keys(): + self.action = _graph["action"] + else: + self.action = "any" + + return self._set_topology(_graph["resources"]) + + def _set_topology(self, _elements): + vgroups = {} + vgroup_captured = False + vms = {} + + ''' empty at this version ''' + volumes = {} + + for rk, r in _elements.iteritems(): + + if r["type"] == "OS::Nova::Server": + vm = VM(self.stack_id, rk) + + if "name" in r.keys(): + vm.name = r["name"] + else: + vm.name = vm.uuid + + vm.flavor = r["properties"]["flavor"] + + if "availability_zone" in r["properties"].keys(): + az = r["properties"]["availability_zone"] + # NOTE: do not allow to specify a certain host name + vm.availability_zone = az.split(":")[0] + + vms[vm.uuid] = vm + + self.logger.debug("Parser: get a vm = " + vm.name) + + elif r["type"] == "OS::Cinder::Volume": + self.logger.warn("Parser: do nothing for volume at this version") + + elif r["type"] == "ATT::Valet::GroupAssignment": + vgroup = VGroup(self.stack_id, rk) + + vgroup.vgroup_type = None + if "group_type" in r["properties"].keys(): + if r["properties"]["group_type"] == "affinity": + vgroup.vgroup_type = "AFF" + elif r["properties"]["group_type"] == "diversity": + vgroup.vgroup_type = "DIV" + elif r["properties"]["group_type"] == "exclusivity": + vgroup.vgroup_type = "EX" + else: + self.status = "unknown group = " + r["properties"]["group_type"] + return {}, {}, {} + else: + self.status = "no group type" + return {}, {}, {} + + if "group_name" in r["properties"].keys(): + vgroup.name = r["properties"]["group_name"] + else: + if vgroup.vgroup_type == "EX": + self.status = "no exclusivity group identifier" + return {}, {}, {} + else: + vgroup.name = "any" + + if "level" in r["properties"].keys(): + vgroup.level = r["properties"]["level"] + if vgroup.level != "host": + if self.high_level_allowed is False: + self.status = "only host level of affinity group allowed " + \ + "due to the mis-match of host naming convention" + return {}, {}, {} + else: + self.status = "no grouping level" + return {}, {}, {} + + vgroups[vgroup.uuid] = vgroup + + self.logger.debug("Parser: get a group = " + vgroup.name) + vgroup_captured = True + + self._set_vm_links(_elements, vms) + + if self._set_volume_links(_elements, vms, volumes) is False: + return {}, {}, {} + + self._set_total_link_capacities(vms, volumes) + + self.logger.debug("Parser: all vms parsed") + + if self._merge_diversity_groups(_elements, vgroups, vms, volumes) is False: + return {}, {}, {} + + if self._merge_exclusivity_groups(_elements, vgroups, vms, volumes) is False: + return {}, {}, {} + + if self._merge_affinity_groups(_elements, vgroups, vms, volumes) is False: + return {}, {}, {} + + ''' delete all EX and DIV vgroups after merging ''' + for vgk in vgroups.keys(): + vg = vgroups[vgk] + if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": + del vgroups[vgk] + + for vgk in vgroups.keys(): + vgroup = vgroups[vgk] + self._set_vgroup_links(vgroup, vgroups, vms, volumes) + + if vgroup_captured is True: + self.logger.debug("Parser: all groups resolved") + + return vgroups, vms, volumes + + def _set_vm_links(self, _elements, _vms): + for _, r in _elements.iteritems(): + if r["type"] == "ATT::CloudQoS::Pipe": + resources = r["properties"]["resources"] + for vk1 in resources: + if vk1 in _vms.keys(): + vm = _vms[vk1] + for vk2 in resources: + if vk2 != vk1: + if vk2 in _vms.keys(): + link = VMLink(_vms[vk2]) + if "bandwidth" in r["properties"].keys(): + link.nw_bandwidth = r["properties"]["bandwidth"]["min"] + vm.vm_list.append(link) + + def _set_volume_links(self, _elements, _vms, _volumes): + for rk, r in _elements.iteritems(): + if r["type"] == "OS::Cinder::VolumeAttachment": + self.logger.warn("Parser: do nothing for volume attachment at this version") + + return True + + def _set_total_link_capacities(self, _vms, _volumes): + for _, vm in _vms.iteritems(): + for vl in vm.vm_list: + vm.nw_bandwidth += vl.nw_bandwidth + for voll in vm.volume_list: + vm.io_bandwidth += voll.io_bandwidth + + for _, volume in _volumes.iteritems(): + for vl in volume.vm_list: + volume.io_bandwidth += vl.io_bandwidth + + def _merge_diversity_groups(self, _elements, _vgroups, _vms, _volumes): + for level in LEVELS: + for rk, r in _elements.iteritems(): + if r["type"] == "ATT::Valet::GroupAssignment" and \ + r["properties"]["group_type"] == "diversity" and \ + r["properties"]["level"] == level: + + vgroup = _vgroups[rk] + + for vk in r["properties"]["resources"]: + if vk in _vms.keys(): + vgroup.subvgroups[vk] = _vms[vk] + _vms[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name + elif vk in _volumes.keys(): + vgroup.subvgroups[vk] = _volumes[vk] + _volumes[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name + elif vk in _vgroups.keys(): + vg = _vgroups[vk] + + if LEVELS.index(vg.level) > LEVELS.index(level): + self.status = "grouping scope: nested group's level is higher" + return False + + if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": + self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in diversity group at this version" + return False + + vgroup.subvgroups[vk] = vg + vg.diversity_groups[rk] = vgroup.level + ":" + vgroup.name + else: + self.status = "invalid resource = " + vk + return False + + return True + + def _merge_exclusivity_groups(self, _elements, _vgroups, _vms, _volumes): + for level in LEVELS: + for rk, r in _elements.iteritems(): + if r["type"] == "ATT::Valet::GroupAssignment" and \ + r["properties"]["group_type"] == "exclusivity" and \ + r["properties"]["level"] == level: + + vgroup = _vgroups[rk] + + for vk in r["properties"]["resources"]: + if vk in _vms.keys(): + vgroup.subvgroups[vk] = _vms[vk] + _vms[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name + elif vk in _volumes.keys(): + vgroup.subvgroups[vk] = _volumes[vk] + _volumes[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name + elif vk in _vgroups.keys(): + vg = _vgroups[vk] + + if LEVELS.index(vg.level) > LEVELS.index(level): + self.status = "grouping scope: nested group's level is higher" + return False + + if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": + self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in exclusivity group at this version" + return False + + vgroup.subvgroups[vk] = vg + vg.exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name + else: + self.status = "invalid resource = " + vk + return False + + return True + + def _merge_affinity_groups(self, _elements, _vgroups, _vms, _volumes): + affinity_map = {} # key is uuid of vm, volume, or vgroup & value is its parent vgroup + + for level in LEVELS: + for rk, r in _elements.iteritems(): + if r["type"] == "ATT::Valet::GroupAssignment" and \ + r["properties"]["group_type"] == "affinity" and \ + r["properties"]["level"] == level: + + vgroup = None + if rk in _vgroups.keys(): + vgroup = _vgroups[rk] + else: + continue + + self.logger.debug("Parser: merge for affinity = " + vgroup.name) + + for vk in r["properties"]["resources"]: + + if vk in _vms.keys(): + vgroup.subvgroups[vk] = _vms[vk] + _vms[vk].survgroup = vgroup + + affinity_map[vk] = vgroup + + self._add_implicit_diversity_groups(vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups(vgroup, _vms[vk].exclusivity_groups) + self._add_memberships(vgroup, _vms[vk]) + + del _vms[vk] + + elif vk in _volumes.keys(): + vgroup.subvgroups[vk] = _volumes[vk] + _volumes[vk].survgroup = vgroup + + affinity_map[vk] = vgroup + + self._add_implicit_diversity_groups(vgroup, _volumes[vk].diversity_groups) + self._add_implicit_exclusivity_groups(vgroup, _volumes[vk].exclusivity_groups) + self._add_memberships(vgroup, _volumes[vk]) + + del _volumes[vk] + + elif vk in _vgroups.keys(): + vg = _vgroups[vk] + + if LEVELS.index(vg.level) > LEVELS.index(level): + self.status = "grouping scope: nested group's level is higher" + return False + + if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": + if self._merge_subgroups(vgroup, vg.subvgroups, _vms, _volumes, _vgroups, + _elements, affinity_map) is False: + return False + del _vgroups[vk] + else: + if self._exist_in_subgroups(vk, vgroup) is None: + if self._get_subgroups(vg, _elements, + _vgroups, _vms, _volumes, + affinity_map) is False: + return False + + vgroup.subvgroups[vk] = vg + vg.survgroup = vgroup + + affinity_map[vk] = vgroup + + self._add_implicit_diversity_groups(vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups(vgroup, vg.exclusivity_groups) + self._add_memberships(vgroup, vg) + + del _vgroups[vk] + + else: # vk belongs to the other vgroup already or refer to invalid resource + if vk not in affinity_map.keys(): + self.status = "invalid resource = " + vk + return False + + if affinity_map[vk].uuid != vgroup.uuid: + if self._exist_in_subgroups(vk, vgroup) is None: + self._set_implicit_grouping(vk, vgroup, affinity_map, _vgroups) + + return True + + def _merge_subgroups(self, _vgroup, _subgroups, _vms, _volumes, _vgroups, _elements, _affinity_map): + for vk, _ in _subgroups.iteritems(): + if vk in _vms.keys(): + _vgroup.subvgroups[vk] = _vms[vk] + _vms[vk].survgroup = _vgroup + + _affinity_map[vk] = _vgroup + + self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) + self._add_memberships(_vgroup, _vms[vk]) + + del _vms[vk] + + elif vk in _volumes.keys(): + _vgroup.subvgroups[vk] = _volumes[vk] + _volumes[vk].survgroup = _vgroup + + _affinity_map[vk] = _vgroup + + self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups) + self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups) + self._add_memberships(_vgroup, _volumes[vk]) + + del _volumes[vk] + + elif vk in _vgroups.keys(): + vg = _vgroups[vk] + + if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): + self.status = "grouping scope: nested group's level is higher" + return False + + if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": + if self._merge_subgroups(_vgroup, vg.subvgroups, + _vms, _volumes, _vgroups, + _elements, _affinity_map) is False: + return False + del _vgroups[vk] + else: + if self._exist_in_subgroups(vk, _vgroup) is None: + if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False: + return False + + _vgroup.subvgroups[vk] = vg + vg.survgroup = _vgroup + + _affinity_map[vk] = _vgroup + + self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) + self._add_memberships(_vgroup, vg) + + del _vgroups[vk] + + else: # vk belongs to the other vgroup already or refer to invalid resource + if vk not in _affinity_map.keys(): + self.status = "invalid resource = " + vk + return False + + if _affinity_map[vk].uuid != _vgroup.uuid: + if self._exist_in_subgroups(vk, _vgroup) is None: + self._set_implicit_grouping(vk, _vgroup, _affinity_map, _vgroups) + + return True + + def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _volumes, _affinity_map): + + for vk in _elements[_vgroup.uuid]["properties"]["resources"]: + + if vk in _vms.keys(): + _vgroup.subvgroups[vk] = _vms[vk] + _vms[vk].survgroup = _vgroup + + _affinity_map[vk] = _vgroup + + self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) + self._add_memberships(_vgroup, _vms[vk]) + + del _vms[vk] + + elif vk in _volumes.keys(): + _vgroup.subvgroups[vk] = _volumes[vk] + _volumes[vk].survgroup = _vgroup + + _affinity_map[vk] = _vgroup + + self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups) + self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups) + self._add_memberships(_vgroup, _volumes[vk]) + + del _volumes[vk] + + elif vk in _vgroups.keys(): + vg = _vgroups[vk] + + if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): + self.status = "grouping scope: nested group's level is higher" + return False + + if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": + if self._merge_subgroups(_vgroup, vg.subvgroups, + _vms, _volumes, _vgroups, + _elements, _affinity_map) is False: + return False + del _vgroups[vk] + else: + if self._exist_in_subgroups(vk, _vgroup) is None: + if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False: + return False + + _vgroup.subvgroups[vk] = vg + vg.survgroup = _vgroup + + _affinity_map[vk] = _vgroup + + self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) + self._add_memberships(_vgroup, vg) + + del _vgroups[vk] + else: + if vk not in _affinity_map.keys(): + self.status = "invalid resource = " + vk + return False + + if _affinity_map[vk].uuid != _vgroup.uuid: + if self._exist_in_subgroups(vk, _vgroup) is None: + self._set_implicit_grouping(vk, _vgroup, _affinity_map, _vgroups) + + return True + + def _add_implicit_diversity_groups(self, _vgroup, _diversity_groups): + for dz, level in _diversity_groups.iteritems(): + l = level.split(":", 1)[0] + if LEVELS.index(l) >= LEVELS.index(_vgroup.level): + _vgroup.diversity_groups[dz] = level + + def _add_implicit_exclusivity_groups(self, _vgroup, _exclusivity_groups): + for ex, level in _exclusivity_groups.iteritems(): + l = level.split(":", 1)[0] + if LEVELS.index(l) >= LEVELS.index(_vgroup.level): + _vgroup.exclusivity_groups[ex] = level + + def _add_memberships(self, _vgroup, _v): + if isinstance(_v, VM) or isinstance(_v, VGroup): + for extra_specs in _v.extra_specs_list: + _vgroup.extra_specs_list.append(extra_specs) + + if isinstance(_v, VM) and _v.availability_zone is not None: + if _v.availability_zone not in _vgroup.availability_zone_list: + _vgroup.availability_zone_list.append(_v.availability_zone) + + if isinstance(_v, VGroup): + for az in _v.availability_zone_list: + if az not in _vgroup.availability_zone_list: + _vgroup.availability_zone_list.append(az) + + ''' + for hgk, hg in _v.host_aggregates.iteritems(): + _vgroup.host_aggregates[hgk] = hg + ''' + + ''' take vk's most top parent as a s_vg's child vgroup ''' + def _set_implicit_grouping(self, _vk, _s_vg, _affinity_map, _vgroups): + t_vg = _affinity_map[_vk] # where _vk currently belongs to + + if t_vg.uuid in _affinity_map.keys(): # if the parent belongs to the other parent vgroup + self._set_implicit_grouping(t_vg.uuid, _s_vg, _affinity_map, _vgroups) + + else: + if LEVELS.index(t_vg.level) > LEVELS.index(_s_vg.level): + t_vg.level = _s_vg.level + + ''' + self.status = "Grouping scope: sub-group's level is larger" + return False + ''' + + if self._exist_in_subgroups(t_vg.uuid, _s_vg) is None: + _s_vg.subvgroups[t_vg.uuid] = t_vg + t_vg.survgroup = _s_vg + + _affinity_map[t_vg.uuid] = _s_vg + + self._add_implicit_diversity_groups(_s_vg, t_vg.diversity_groups) + self._add_implicit_exclusivity_groups(_s_vg, t_vg.exclusivity_groups) + self._add_memberships(_s_vg, t_vg) + + del _vgroups[t_vg.uuid] + + def _exist_in_subgroups(self, _vk, _vg): + containing_vg_uuid = None + for vk, v in _vg.subvgroups.iteritems(): + if vk == _vk: + containing_vg_uuid = _vg.uuid + break + else: + if isinstance(v, VGroup): + containing_vg_uuid = self._exist_in_subgroups(_vk, v) + if containing_vg_uuid is not None: + break + return containing_vg_uuid + + def _set_vgroup_links(self, _vgroup, _vgroups, _vms, _volumes): + for _, svg in _vgroup.subvgroups.iteritems(): # currently, not define vgroup itself in pipe + if isinstance(svg, VM): + for vml in svg.vm_list: + found = False + for _, tvgroup in _vgroups.iteritems(): + containing_vg_uuid = self._exist_in_subgroups(vml.node.uuid, tvgroup) + if containing_vg_uuid is not None: + found = True + if containing_vg_uuid != _vgroup.uuid and \ + self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None: + self._add_nw_link(vml, _vgroup) + break + if found is False: + for tvk in _vms.keys(): + if tvk == vml.node.uuid: + self._add_nw_link(vml, _vgroup) + break + for voll in svg.volume_list: + found = False + for _, tvgroup in _vgroups.iteritems(): + containing_vg_uuid = self._exist_in_subgroups(voll.node.uuid, tvgroup) + if containing_vg_uuid is not None: + found = True + if containing_vg_uuid != _vgroup.uuid and \ + self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None: + self._add_io_link(voll, _vgroup) + break + if found is False: + for tvk in _volumes.keys(): + if tvk == voll.node.uuid: + self._add_io_link(voll, _vgroup) + break + # elif isinstance(svg, Volume): + # for vml in svg.vm_list: + # found = False + # for _, tvgroup in _vgroups.iteritems(): + # containing_vg_uuid = self._exist_in_subgroups(vml.node.uuid, tvgroup) + # if containing_vg_uuid is not None: + # found = True + # if containing_vg_uuid != _vgroup.uuid and \ + # self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None: + # self._add_io_link(vml, _vgroup) + # break + # if found is False: + # for tvk in _vms.keys(): + # if tvk == vml.node.uuid: + # self._add_io_link(vml, _vgroup) + # break + elif isinstance(svg, VGroup): + self._set_vgroup_links(svg, _vgroups, _vms, _volumes) + + for svgl in svg.vgroup_list: # svgl is a link to VM or Volume + if self._exist_in_subgroups(svgl.node.uuid, _vgroup) is None: + self._add_nw_link(svgl, _vgroup) + self._add_io_link(svgl, _vgroup) + + def _add_nw_link(self, _link, _vgroup): + _vgroup.nw_bandwidth += _link.nw_bandwidth + vgroup_link = self._get_vgroup_link(_link, _vgroup.vgroup_list) + if vgroup_link is not None: + vgroup_link.nw_bandwidth += _link.nw_bandwidth + else: + link = VGroupLink(_link.node) # _link.node is VM + link.nw_bandwidth = _link.nw_bandwidth + _vgroup.vgroup_list.append(link) + + def _add_io_link(self, _link, _vgroup): + _vgroup.io_bandwidth += _link.io_bandwidth + vgroup_link = self._get_vgroup_link(_link, _vgroup.vgroup_list) + if vgroup_link is not None: + vgroup_link.io_bandwidth += _link.io_bandwidth + else: + link = VGroupLink(_link.node) + link.io_bandwidth = _link.io_bandwidth + _vgroup.vgroup_list.append(link) + + def _get_vgroup_link(self, _link, _vgroup_link_list): + vgroup_link = None + for vgl in _vgroup_link_list: + if vgl.node.uuid == _link.node.uuid: + vgroup_link = vgl + break + return vgroup_link diff --git a/valet/engine/optimizer/app_manager/application.py b/valet/engine/optimizer/app_manager/application.py new file mode 100755 index 0000000..2f0e80a --- /dev/null +++ b/valet/engine/optimizer/app_manager/application.py @@ -0,0 +1,62 @@ +#!/bin/python + +# Modified: Feb. 9, 2016 + + +class App(object): + + def __init__(self, _app_id, _app_name, _action): + self.app_id = _app_id + self.app_name = _app_name + + self.request_type = _action # create, update, or ping + + self.timestamp_scheduled = 0 + + self.vgroups = {} + self.vms = {} + self.volumes = {} + + self.status = 'requested' # Moved to "scheduled" (and then "placed") + + def add_vm(self, _vm, _host_name): + self.vms[_vm.uuid] = _vm + self.vms[_vm.uuid].status = "scheduled" + self.vms[_vm.uuid].host = _host_name + + def add_volume(self, _vol, _host_name): + self.vms[_vol.uuid] = _vol + self.vms[_vol.uuid].status = "scheduled" + self.vms[_vol.uuid].storage_host = _host_name + + def add_vgroup(self, _vg, _host_name): + self.vgroups[_vg.uuid] = _vg + self.vgroups[_vg.uuid].status = "scheduled" + self.vgroups[_vg.uuid].host = _host_name + + def get_json_info(self): + vms = {} + for vmk, vm in self.vms.iteritems(): + vms[vmk] = vm.get_json_info() + + vols = {} + for volk, vol in self.volumes.iteritems(): + vols[volk] = vol.get_json_info() + + vgs = {} + for vgk, vg in self.vgroups.iteritems(): + vgs[vgk] = vg.get_json_info() + + return {'action': self.request_type, + 'timestamp': self.timestamp_scheduled, + 'stack_id': self.app_id, + 'name': self.app_name, + 'VMs': vms, + 'Volumes': vols, + 'VGroups': vgs} + + def log_in_info(self): + return {'action': self.request_type, + 'timestamp': self.timestamp_scheduled, + 'stack_id': self.app_id, + 'name': self.app_name} diff --git a/valet/engine/optimizer/db_connect/__init__.py b/valet/engine/optimizer/db_connect/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/optimizer/db_connect/client.cfg b/valet/engine/optimizer/db_connect/client.cfg new file mode 100644 index 0000000..8f0825f --- /dev/null +++ b/valet/engine/optimizer/db_connect/client.cfg @@ -0,0 +1,17 @@ +# Version 2.0.2: Feb. 9, 2016 + +# Set database keyspace +db_keyspace=valet_test +db_request_table=placement_requests +db_response_table=placement_results +db_event_table=oslo_messages +db_resource_table=resource_status +db_resource_index_table=resource_log_index +db_app_index_table=app_log_index +db_app_table=app +db_uuid_table=uuid_map + +#replication_factor=3 + + + diff --git a/valet/engine/optimizer/db_connect/configuration.py b/valet/engine/optimizer/db_connect/configuration.py new file mode 100644 index 0000000..6dedc25 --- /dev/null +++ b/valet/engine/optimizer/db_connect/configuration.py @@ -0,0 +1,73 @@ +#!/bin/python + + +################################################################################################################# +# Author: Gueyoung Jung +# Contact: gjung@research.att.com +# Version 2.0.2: Feb. 9, 2016 +# +# Functions +# +################################################################################################################# + + +import sys + + +class Config(object): + + def __init__(self): + self.mode = None + + self.db_keyspace = None + self.db_request_table = None + self.db_response_table = None + self.db_event_table = None + self.db_resource_table = None + self.db_app_table = None + self.db_resource_index_table = None + self.db_app_index_table = None + self.db_uuid_table = None + + def configure(self): + try: + f = open("./client.cfg", "r") + line = f.readline() + + while line: + if line.startswith("#") or line.startswith(" ") or line == "\n": + line = f.readline() + continue + + (rk, v) = line.split("=") + k = rk.strip() + + if k == "db_keyspace": + self.db_keyspace = v.strip() + elif k == "db_request_table": + self.db_request_table = v.strip() + elif k == "db_response_table": + self.db_response_table = v.strip() + elif k == "db_event_table": + self.db_event_table = v.strip() + elif k == "db_resource_table": + self.db_resource_table = v.strip() + elif k == "db_app_table": + self.db_app_table = v.strip() + elif k == "db_resource_index_table": + self.db_resource_index_table = v.strip() + elif k == "db_app_index_table": + self.db_app_index_table = v.strip() + elif k == "db_uuid_table": + self.db_uuid_table = v.strip() + + line = f.readline() + + f.close() + + return "success" + + except IOError as e: + return "I/O error({}): {}".format(e.errno, e.strerror) + except Exception: + return "Unexpected error: ", sys.exc_info()[0] diff --git a/valet/engine/optimizer/db_connect/event.py b/valet/engine/optimizer/db_connect/event.py new file mode 100644 index 0000000..709c47c --- /dev/null +++ b/valet/engine/optimizer/db_connect/event.py @@ -0,0 +1,150 @@ +#!/bin/python + +# Modified: Feb. 9, 2016 + + +import json + + +class Event(object): + + def __init__(self, _id): + self.event_id = _id + self.exchange = None + self.method = None + self.args = {} + + # For object_action event + self.change_list = [] + self.change_data = {} + self.object_name = None + + # For object_action and Instance object + self.vm_state = None + + # For object_action and ComputeNode object + self.status = "enabled" + self.vcpus_used = -1 + self.free_mem = -1 + self.free_local_disk = -1 + self.disk_available_least = -1 + self.numa_cell_list = [] + + # Common between Instance and ComputeNode + self.host = None + self.vcpus = -1 + self.mem = -1 + self.local_disk = 0 + + # For build_and_run_instance + self.heat_resource_name = None + self.heat_resource_uuid = None + self.heat_root_stack_id = None + self.heat_stack_name = None + + # Common data + self.uuid = None + + def set_data(self): + if self.method == 'object_action': + self.change_list = self.args['objinst']['nova_object.changes'] + self.change_data = self.args['objinst']['nova_object.data'] + self.object_name = self.args['objinst']['nova_object.name'] + + if self.object_name == 'Instance': + if 'uuid' in self.change_data.keys(): + self.uuid = self.change_data['uuid'] + + if 'host' in self.change_data.keys(): + self.host = self.change_data['host'] + + if 'vcpus' in self.change_data.keys(): + self.vcpus = float(self.change_data['vcpus']) + + if 'memory_mb' in self.change_data.keys(): + self.mem = float(self.change_data['memory_mb']) + + root = -1 + ephemeral = -1 + swap = -1 + if 'root_gb' in self.change_data.keys(): + root = float(self.change_data['root_gb']) + + if 'ephemeral_gb' in self.change_data.keys(): + ephemeral = float(self.change_data['ephemeral_gb']) + + if 'flavor' in self.change_data.keys(): + flavor = self.change_data['flavor'] + if 'nova_object.data' in flavor.keys(): + flavor_data = flavor['nova_object.data'] + if 'swap' in flavor_data.keys(): + swap = float(flavor_data['swap']) + + if root != -1: + self.local_disk += root + if ephemeral != -1: + self.local_disk += ephemeral + if swap != -1: + self.local_disk += swap / float(1024) + + self.vm_state = self.change_data['vm_state'] + + elif self.object_name == 'ComputeNode': + if 'host' in self.change_data.keys(): + self.host = self.change_data['host'] + + if 'deleted' in self.change_list and 'deleted' in self.change_data.keys(): + if self.change_data['deleted'] == "true" or self.change_data['deleted'] is True: + self.status = "disabled" + + if 'vcpus' in self.change_list and 'vcpus' in self.change_data.keys(): + self.vcpus = self.change_data['vcpus'] + + if 'vcpus_used' in self.change_list and 'vcpus_used' in self.change_data.keys(): + self.vcpus_used = self.change_data['vcpus_used'] + + if 'memory_mb' in self.change_list and 'memory_mb' in self.change_data.keys(): + self.mem = self.change_data['memory_mb'] + + if 'free_ram_mb' in self.change_list and 'free_ram_mb' in self.change_data.keys(): + self.free_mem = self.change_data['free_ram_mb'] + + if 'local_gb' in self.change_list and 'local_gb' in self.change_data.keys(): + self.local_disk = self.change_data['local_gb'] + + if 'free_disk_gb' in self.change_list and 'free_disk_gb' in self.change_data.keys(): + self.free_local_disk = self.change_data['free_disk_gb'] + + if 'disk_available_least' in self.change_list and \ + 'disk_available_least' in self.change_data.keys(): + self.disk_available_least = self.change_data['disk_available_least'] + + if 'numa_topology' in self.change_list and 'numa_topology' in self.change_data.keys(): + str_numa_topology = self.change_data['numa_topology'] + try: + numa_topology = json.loads(str_numa_topology) + # print json.dumps(numa_topology, indent=4) + + if 'nova_object.data' in numa_topology.keys(): + if 'cells' in numa_topology['nova_object.data']: + for cell in numa_topology['nova_object.data']['cells']: + self.numa_cell_list.append(cell) + + except (ValueError, KeyError, TypeError): + pass + # print "error while parsing numa_topology" + + elif self.method == 'build_and_run_instance': + if 'scheduler_hints' in self.args['filter_properties'].keys(): + scheduler_hints = self.args['filter_properties']['scheduler_hints'] + if 'heat_resource_name' in scheduler_hints.keys(): + self.heat_resource_name = scheduler_hints['heat_resource_name'] + if 'heat_resource_uuid' in scheduler_hints.keys(): + self.heat_resource_uuid = scheduler_hints['heat_resource_uuid'] + if 'heat_root_stack_id' in scheduler_hints.keys(): + self.heat_root_stack_id = scheduler_hints['heat_root_stack_id'] + if 'heat_stack_name' in scheduler_hints.keys(): + self.heat_stack_name = scheduler_hints['heat_stack_name'] + + if 'uuid' in self.args['instance']['nova_object.data'].keys(): + self.uuid = self.args['instance']['nova_object.data']['uuid'] diff --git a/valet/engine/optimizer/db_connect/music_handler.py b/valet/engine/optimizer/db_connect/music_handler.py new file mode 100644 index 0000000..1b632ec --- /dev/null +++ b/valet/engine/optimizer/db_connect/music_handler.py @@ -0,0 +1,702 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + + +import json +import operator +from valet.api.db.models.music import Music +from valet.engine.optimizer.db_connect.event import Event + + +class MusicHandler(object): + + def __init__(self, _config, _logger): + self.config = _config + self.logger = _logger + + self.music = None + + self.logger.debug("MusicHandler.__init__: mode = " + self.config.mode) + + if self.config.mode.startswith("sim"): + self.music = Music() + elif self.config.mode.startswith("live"): + self.music = Music(hosts=self.config.db_hosts, replication_factor=self.config.replication_factor) + + def init_db(self): + self.logger.info("MusicHandler.init_db: create table") + + try: + self.music.create_keyspace(self.config.db_keyspace) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + self.logger.info("MusicHandler.init_db: create table") + + schema = { + 'stack_id': 'text', + 'request': 'text', + 'PRIMARY KEY': '(stack_id)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_request_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + schema = { + 'stack_id': 'text', + 'placement': 'text', + 'PRIMARY KEY': '(stack_id)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_response_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + schema = { + 'timestamp': 'text', + 'exchange': 'text', + 'method': 'text', + 'args': 'text', + 'PRIMARY KEY': '(timestamp)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_event_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + schema = { + 'site_name': 'text', + 'resource': 'text', + 'PRIMARY KEY': '(site_name)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_resource_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + schema = { + 'stack_id': 'text', + 'app': 'text', + 'PRIMARY KEY': '(stack_id)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_app_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + schema = { + 'site_name': 'text', + 'app_log_index': 'text', + 'PRIMARY KEY': '(site_name)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_app_index_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + schema = { + 'site_name': 'text', + 'resource_log_index': 'text', + 'PRIMARY KEY': '(site_name)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_resource_index_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + schema = { + 'uuid': 'text', + 'h_uuid': 'text', + 's_uuid': 'text', + 'PRIMARY KEY': '(uuid)' + } + try: + self.music.create_table(self.config.db_keyspace, self.config.db_uuid_table, schema) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + return True + + def get_events(self): + event_list = [] + + events = {} + try: + events = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) + except Exception as e: + self.logger.error("MUSIC error while reading events: " + str(e)) + return None + + if len(events) > 0: + for _, row in events.iteritems(): + event_id = row['timestamp'] + exchange = row['exchange'] + method = row['method'] + args_data = row['args'] + + self.logger.debug("MusicHandler.get_events: event (" + event_id + ") is entered") + + if exchange != "nova": + if self.delete_event(event_id) is False: + return None + self.logger.debug("MusicHandler.get_events: event exchange (" + exchange + ") is not supported") + continue + + if method != 'object_action' and method != 'build_and_run_instance': + if self.delete_event(event_id) is False: + return None + self.logger.debug("MusicHandler.get_events: event method (" + method + ") is not considered") + continue + + if len(args_data) == 0: + if self.delete_event(event_id) is False: + return None + self.logger.debug("MusicHandler.get_events: event does not have args") + continue + + try: + args = json.loads(args_data) + except (ValueError, KeyError, TypeError): + self.logger.warn("MusicHandler.get_events: error while decoding to JSON event = " + method + ":" + event_id) + continue + + if method == 'object_action': + if 'objinst' in args.keys(): + objinst = args['objinst'] + if 'nova_object.name' in objinst.keys(): + nova_object_name = objinst['nova_object.name'] + if nova_object_name == 'Instance': + if 'nova_object.changes' in objinst.keys() and \ + 'nova_object.data' in objinst.keys(): + change_list = objinst['nova_object.changes'] + change_data = objinst['nova_object.data'] + if 'vm_state' in change_list and \ + 'vm_state' in change_data.keys(): + if change_data['vm_state'] == 'deleted' or \ + change_data['vm_state'] == 'active': + e = Event(event_id) + e.exchange = exchange + e.method = method + e.args = args + event_list.append(e) + else: + if self.delete_event(event_id) is False: + return None + else: + if self.delete_event(event_id) is False: + return None + else: + if self.delete_event(event_id) is False: + return None + elif nova_object_name == 'ComputeNode': + if 'nova_object.changes' in objinst.keys() and \ + 'nova_object.data' in objinst.keys(): + e = Event(event_id) + e.exchange = exchange + e.method = method + e.args = args + event_list.append(e) + else: + if self.delete_event(event_id) is False: + return None + else: + if self.delete_event(event_id) is False: + return None + else: + if self.delete_event(event_id) is False: + return None + else: + if self.delete_event(event_id) is False: + return None + + elif method == 'build_and_run_instance': + if 'filter_properties' not in args.keys(): + if self.delete_event(event_id) is False: + return None + continue + ''' + else: + filter_properties = args['filter_properties'] + if 'scheduler_hints' not in filter_properties.keys(): + self.delete_event(event_id) + continue + ''' + + if 'instance' not in args.keys(): + if self.delete_event(event_id) is False: + return None + continue + else: + instance = args['instance'] + if 'nova_object.data' not in instance.keys(): + if self.delete_event(event_id) is False: + return None + continue + + e = Event(event_id) + e.exchange = exchange + e.method = method + e.args = args + event_list.append(e) + + error_event_list = [] + for e in event_list: + e.set_data() + + self.logger.debug("MusicHandler.get_events: event (" + e.event_id + ") is parsed") + + if e.method == "object_action": + if e.object_name == 'Instance': + if e.uuid is None or e.uuid == "none" or \ + e.host is None or e.host == "none" or \ + e.vcpus == -1 or e.mem == -1: + error_event_list.append(e) + self.logger.warn("MusicHandler.get_events: data missing in instance object event") + + elif e.object_name == 'ComputeNode': + if e.host is None or e.host == "none": + error_event_list.append(e) + self.logger.warn("MusicHandler.get_events: data missing in compute object event") + + elif e.method == "build_and_run_instance": + ''' + if e.heat_resource_name == None or e.heat_resource_name == "none" or \ + e.heat_resource_uuid == None or e.heat_resource_uuid == "none" or \ + e.heat_root_stack_id == None or e.heat_root_stack_id == "none" or \ + e.heat_stack_name == None or e.heat_stack_name == "none" or \ + e.uuid == None or e.uuid == "none": + ''' + if e.uuid is None or e.uuid == "none": + error_event_list.append(e) + self.logger.warn("MusicHandler.get_events: data missing in build event") + + if len(error_event_list) > 0: + event_list[:] = [e for e in event_list if e not in error_event_list] + + if len(event_list) > 0: + event_list.sort(key=operator.attrgetter('event_id')) + + return event_list + + def delete_event(self, _event_id): + try: + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_event_table, + 'timestamp', _event_id) + except Exception as e: + self.logger.error("MUSIC error while deleting event: " + str(e)) + return False + + return True + + def get_uuid(self, _uuid): + h_uuid = "none" + s_uuid = "none" + + row = {} + try: + row = self.music.read_row(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _uuid) + except Exception as e: + self.logger.error("MUSIC error while reading uuid: " + str(e)) + return None + + if len(row) > 0: + h_uuid = row[row.keys()[0]]['h_uuid'] + s_uuid = row[row.keys()[0]]['s_uuid'] + + self.logger.info("MusicHandler.get_uuid: get heat uuid (" + h_uuid + ") for uuid = " + _uuid) + else: + self.logger.debug("MusicHandler.get_uuid: heat uuid not found") + + return h_uuid, s_uuid + + def put_uuid(self, _e): + heat_resource_uuid = "none" + heat_root_stack_id = "none" + if _e.heat_resource_uuid is not None and _e.heat_resource_uuid != "none": + heat_resource_uuid = _e.heat_resource_uuid + if _e.heat_root_stack_id is not None and _e.heat_root_stack_id != "none": + heat_root_stack_id = _e.heat_root_stack_id + + data = { + 'uuid': _e.uuid, + 'h_uuid': heat_resource_uuid, + 's_uuid': heat_root_stack_id + } + + try: + self.music.create_row(self.config.db_keyspace, self.config.db_uuid_table, data) + except Exception as e: + self.logger.error("MUSIC error while inserting uuid: " + str(e)) + return False + + self.logger.info("MusicHandler.put_uuid: uuid (" + _e.uuid + ") added") + + ''' + self.delete_event(_e.event_id) + + self.logger.info("db: build event (" + _e.event_id + ") deleted") + ''' + + return True + + def delete_uuid(self, _k): + try: + self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _k) + except Exception as e: + self.logger.error("MUSIC error while deleting uuid: " + str(e)) + return False + + return True + + def get_requests(self): + request_list = [] + + requests = {} + try: + requests = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) + except Exception as e: + self.logger.error("MUSIC error while reading requests: " + str(e)) + return None + + if len(requests) > 0: + self.logger.info("MusicHandler.get_requests: placement request arrived") + + for _, row in requests.iteritems(): + self.logger.info(" request_id = " + row['stack_id']) + + r_list = json.loads(row['request']) + for r in r_list: + request_list.append(r) + + return request_list + + def put_result(self, _result): + for appk, app_placement in _result.iteritems(): + data = { + 'stack_id': appk, + 'placement': json.dumps(app_placement) + } + + try: + self.music.create_row(self.config.db_keyspace, self.config.db_response_table, data) + except Exception as e: + self.logger.error("MUSIC error while putting placement result: " + str(e)) + return False + + self.logger.info("MusicHandler.put_result: " + appk + " placement result added") + + for appk in _result.keys(): + try: + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_request_table, + 'stack_id', appk) + except Exception as e: + self.logger.error("MUSIC error while deleting handled request: " + str(e)) + return False + + self.logger.info("MusicHandler.put_result: " + appk + " placement request deleted") + + return True + + def get_resource_status(self, _k): + json_resource = {} + + row = {} + try: + row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k, self.logger) + except Exception as e: + self.logger.error("MUSIC error while reading resource status: " + str(e)) + return None + + if len(row) > 0: + str_resource = row[row.keys()[0]]['resource'] + json_resource = json.loads(str_resource) + + self.logger.info("MusicHandler.get_resource_status: get resource status") + + return json_resource + + def update_resource_status(self, _k, _status): + row = {} + try: + row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k) + except Exception as e: + self.logger.error("MUSIC error while reading resource status: " + str(e)) + return False + + json_resource = {} + if len(row) > 0: + str_resource = row[row.keys()[0]]['resource'] + json_resource = json.loads(str_resource) + + if 'flavors' in _status.keys(): + flavors = _status['flavors'] + for fk, f in flavors.iteritems(): + if fk in json_resource['flavors'].keys(): + del json_resource['flavors'][fk] + json_resource['flavors'][fk] = f + + if 'logical_groups' in _status.keys(): + logical_groups = _status['logical_groups'] + for lgk, lg in logical_groups.iteritems(): + if lgk in json_resource['logical_groups'].keys(): + del json_resource['logical_groups'][lgk] + json_resource['logical_groups'][lgk] = lg + + if 'storages' in _status.keys(): + storages = _status['storages'] + for stk, st in storages.iteritems(): + if stk in json_resource['storages'].keys(): + del json_resource['storages'][stk] + json_resource['storages'][stk] = st + + if 'switches' in _status.keys(): + switches = _status['switches'] + for sk, s in switches.iteritems(): + if sk in json_resource['switches'].keys(): + del json_resource['switches'][sk] + json_resource['switches'][sk] = s + + if 'hosts' in _status.keys(): + hosts = _status['hosts'] + for hk, h in hosts.iteritems(): + if hk in json_resource['hosts'].keys(): + del json_resource['hosts'][hk] + json_resource['hosts'][hk] = h + + if 'host_groups' in _status.keys(): + host_groupss = _status['host_groups'] + for hgk, hg in host_groupss.iteritems(): + if hgk in json_resource['host_groups'].keys(): + del json_resource['host_groups'][hgk] + json_resource['host_groups'][hgk] = hg + + if 'datacenter' in _status.keys(): + datacenter = _status['datacenter'] + del json_resource['datacenter'] + json_resource['datacenter'] = datacenter + + json_resource['timestamp'] = _status['timestamp'] + + try: + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_resource_table, + 'site_name', _k) + except Exception as e: + self.logger.error("MUSIC error while deleting resource status: " + str(e)) + return False + + else: + json_resource = _status + + data = { + 'site_name': _k, + 'resource': json.dumps(json_resource) + } + + try: + self.music.create_row(self.config.db_keyspace, self.config.db_resource_table, data) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + self.logger.info("MusicHandler.update_resource_status: resource status updated") + + return True + + def update_resource_log_index(self, _k, _index): + data = { + 'site_name': _k, + 'resource_log_index': str(_index) + } + + try: + self.music.update_row_eventually(self.config.db_keyspace, + self.config.db_resource_index_table, + 'site_name', _k, data) + except Exception as e: + self.logger.error("MUSIC error while updating resource log index: " + str(e)) + return False + + self.logger.info("MusicHandler.update_resource_log_index: resource log index updated") + + return True + + def update_app_log_index(self, _k, _index): + data = { + 'site_name': _k, + 'app_log_index': str(_index) + } + + try: + self.music.update_row_eventually(self.config.db_keyspace, + self.config.db_app_index_table, + 'site_name', _k, data) + except Exception as e: + self.logger.error("MUSIC error while updating app log index: " + str(e)) + return False + + self.logger.info("MusicHandler.update_app_log_index: app log index updated") + + return True + + def add_app(self, _k, _app_data): + try: + self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _k) + except Exception as e: + self.logger.error("MUSIC error while deleting app: " + str(e)) + return False + + self.logger.info("MusicHandler.add_app: app deleted") + + if _app_data is not None: + data = { + 'stack_id': _k, + 'app': json.dumps(_app_data) + } + + try: + self.music.create_row(self.config.db_keyspace, self.config.db_app_table, data) + except Exception as e: + self.logger.error("MUSIC error while inserting app: " + str(e)) + return False + + self.logger.info("MusicHandler.add_app: app added") + + return True + + def get_app_info(self, _s_uuid): + json_app = {} + + row = {} + try: + row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) + except Exception as e: + self.logger.error("MUSIC error while reading app info: " + str(e)) + return None + + if len(row) > 0: + str_app = row[row.keys()[0]]['app'] + json_app = json.loads(str_app) + + return json_app + + # TODO(GY): get all other VMs related to this VM + def get_vm_info(self, _s_uuid, _h_uuid, _host): + updated = False + json_app = {} + + vm_info = {} + + row = {} + try: + row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return None + + if len(row) > 0: + str_app = row[row.keys()[0]]['app'] + json_app = json.loads(str_app) + + vms = json_app["VMs"] + for vmk, vm in vms.iteritems(): + if vmk == _h_uuid: + if vm["status"] != "deleted": + if vm["host"] != _host: + vm["planned_host"] = vm["host"] + vm["host"] = _host + self.logger.warn("db: conflicted placement decision from Ostro") + # TODO(GY): affinity, diversity, exclusivity validation check + updated = True + else: + self.logger.debug("db: placement as expected") + else: + vm["status"] = "scheduled" + self.logger.warn("db: vm was deleted") + updated = True + + vm_info = vm + break + else: + self.logger.error("MusicHandler.get_vm_info: vm is missing from stack") + + else: + self.logger.warn("MusicHandler.get_vm_info: not found stack for update = " + _s_uuid) + + if updated is True: + if self.add_app(_s_uuid, json_app) is False: + return None + + return vm_info + + def update_vm_info(self, _s_uuid, _h_uuid): + updated = False + json_app = {} + + row = {} + try: + row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) + except Exception as e: + self.logger.error("MUSIC error: " + str(e)) + return False + + if len(row) > 0: + str_app = row[row.keys()[0]]['app'] + json_app = json.loads(str_app) + + vms = json_app["VMs"] + for vmk, vm in vms.iteritems(): + if vmk == _h_uuid: + if vm["status"] != "deleted": + vm["status"] = "deleted" + self.logger.debug("db: deleted marked") + updated = True + else: + self.logger.warn("db: vm was already deleted") + + break + else: + self.logger.error("MusicHandler.update_vm_info: vm is missing from stack") + + else: + self.logger.warn("MusicHandler.update_vm_info: not found stack for update = " + _s_uuid) + + if updated is True: + if self.add_app(_s_uuid, json_app) is False: + return False + + return True + + +# Unit test +''' +if __name__ == '__main__': + config = Config() + config_status = config.configure() + if config_status != "success": + print "Error while configuring Client: " + config_status + sys.exit(2) + + mh = MusicHandler(config, None) + event_list = mh.get_events() + for e in event_list: + print "event id = ", e.event_id + print "host = ", e.host + print "least disk = ", e.disk_available_least + print "disk = ", e.local_disk + for nc in e.numa_cell_list: + print "numa cell = ", nc +''' diff --git a/valet/engine/optimizer/ostro/__init__.py b/valet/engine/optimizer/ostro/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/optimizer/ostro/constraint_solver.py b/valet/engine/optimizer/ostro/constraint_solver.py new file mode 100755 index 0000000..bbf99a6 --- /dev/null +++ b/valet/engine/optimizer/ostro/constraint_solver.py @@ -0,0 +1,554 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + + +from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, LEVELS +from valet.engine.optimizer.ostro.openstack_filters import AggregateInstanceExtraSpecsFilter +from valet.engine.optimizer.ostro.openstack_filters import AvailabilityZoneFilter +from valet.engine.optimizer.ostro.openstack_filters import CoreFilter +from valet.engine.optimizer.ostro.openstack_filters import DiskFilter +from valet.engine.optimizer.ostro.openstack_filters import RamFilter + + +class ConstraintSolver(object): + + def __init__(self, _logger): + self.logger = _logger + + self.openstack_AZ = AvailabilityZoneFilter(self.logger) + self.openstack_AIES = AggregateInstanceExtraSpecsFilter(self.logger) + self.openstack_R = RamFilter(self.logger) + self.openstack_C = CoreFilter(self.logger) + self.openstack_D = DiskFilter(self.logger) + + self.status = "success" + + def compute_candidate_list(self, _level, _n, _node_placements, _avail_resources, _avail_logical_groups): + candidate_list = [] + + ''' when replanning ''' + if _n.node.host is not None and len(_n.node.host) > 0: + self.logger.debug("ConstraintSolver: reconsider with given candidates") + for hk in _n.node.host: + for ark, ar in _avail_resources.iteritems(): + if hk == ark: + candidate_list.append(ar) + else: + for _, r in _avail_resources.iteritems(): + candidate_list.append(r) + if len(candidate_list) == 0: + self.status = "no candidate for node = " + _n.node.name + self.logger.warn("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: num of candidates = " + str(len(candidate_list))) + + ''' availability zone constraint ''' + if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): + if (isinstance(_n.node, VM) and _n.node.availability_zone is not None) or \ + (isinstance(_n.node, VGroup) and len(_n.node.availability_zone_list) > 0): + self._constrain_availability_zone(_level, _n, candidate_list) + if len(candidate_list) == 0: + self.status = "violate availability zone constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done availability_zone constraint") + + ''' host aggregate constraint ''' + if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): + if len(_n.node.extra_specs_list) > 0: + self._constrain_host_aggregates(_level, _n, candidate_list) + if len(candidate_list) == 0: + self.status = "violate host aggregate constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done host_aggregate constraint") + + ''' cpu capacity constraint ''' + if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): + self._constrain_cpu_capacity(_level, _n, candidate_list) + if len(candidate_list) == 0: + self.status = "violate cpu capacity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done cpu capacity constraint") + + ''' memory capacity constraint ''' + if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): + self._constrain_mem_capacity(_level, _n, candidate_list) + if len(candidate_list) == 0: + self.status = "violate memory capacity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done memory capacity constraint") + + ''' local disk capacity constraint ''' + if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): + self._constrain_local_disk_capacity(_level, _n, candidate_list) + if len(candidate_list) == 0: + self.status = "violate local disk capacity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done local disk capacity constraint") + + ''' network bandwidth constraint ''' + self._constrain_nw_bandwidth_capacity(_level, _n, _node_placements, candidate_list) + if len(candidate_list) == 0: + self.status = "violate nw bandwidth capacity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done bandwidth capacity constraint") + + ''' diversity constraint ''' + if len(_n.node.diversity_groups) > 0: + for _, diversity_id in _n.node.diversity_groups.iteritems(): + if diversity_id.split(":")[0] == _level: + if diversity_id in _avail_logical_groups.keys(): + self._constrain_diversity_with_others(_level, diversity_id, candidate_list) + if len(candidate_list) == 0: + break + if len(candidate_list) == 0: + self.status = "violate diversity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self._constrain_diversity(_level, _n, _node_placements, candidate_list) + if len(candidate_list) == 0: + self.status = "violate diversity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done diversity_group constraint") + + ''' exclusivity constraint ''' + exclusivities = self.get_exclusivities(_n.node.exclusivity_groups, _level) + if len(exclusivities) > 1: + self.status = "violate exclusivity constraint (more than one exclusivity) for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return [] + else: + if len(exclusivities) == 1: + exclusivity_id = exclusivities[exclusivities.keys()[0]] + if exclusivity_id.split(":")[0] == _level: + self._constrain_exclusivity(_level, exclusivity_id, candidate_list) + if len(candidate_list) == 0: + self.status = "violate exclusivity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done exclusivity_group constraint") + else: + self._constrain_non_exclusivity(_level, candidate_list) + if len(candidate_list) == 0: + self.status = "violate non-exclusivity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done non-exclusivity_group constraint") + + ''' affinity constraint ''' + affinity_id = _n.get_affinity_id() # level:name, except name == "any" + if affinity_id is not None: + if affinity_id.split(":")[0] == _level: + if affinity_id in _avail_logical_groups.keys(): + self._constrain_affinity(_level, affinity_id, candidate_list) + if len(candidate_list) == 0: + self.status = "violate affinity constraint for node = " + _n.node.name + self.logger.error("ConstraintSolver: " + self.status) + return candidate_list + else: + self.logger.debug("ConstraintSolver: done affinity_group constraint") + + return candidate_list + + ''' + constraint modules + ''' + + def _constrain_affinity(self, _level, _affinity_id, _candidate_list): + conflict_list = [] + + for r in _candidate_list: + if self.exist_group(_level, _affinity_id, "AFF", r) is False: + if r not in conflict_list: + conflict_list.append(r) + + debug_resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: not exist affinity in resource = " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def _constrain_diversity_with_others(self, _level, _diversity_id, _candidate_list): + conflict_list = [] + + for r in _candidate_list: + if self.exist_group(_level, _diversity_id, "DIV", r) is True: + if r not in conflict_list: + conflict_list.append(r) + + debug_resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: conflict diversity in resource = " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def exist_group(self, _level, _id, _group_type, _candidate): + match = False + + memberships = _candidate.get_memberships(_level) + for lgk, lgr in memberships.iteritems(): + if lgr.group_type == _group_type and lgk == _id: + match = True + break + + return match + + def _constrain_diversity(self, _level, _n, _node_placements, _candidate_list): + conflict_list = [] + + for r in _candidate_list: + if self.conflict_diversity(_level, _n, _node_placements, r) is True: + if r not in conflict_list: + conflict_list.append(r) + + resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: conflict the diversity in resource = " + resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def conflict_diversity(self, _level, _n, _node_placements, _candidate): + conflict = False + + for v in _node_placements.keys(): + diversity_level = _n.get_common_diversity(v.diversity_groups) + if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level): + if diversity_level == "host": + if _candidate.cluster_name == _node_placements[v].cluster_name and \ + _candidate.rack_name == _node_placements[v].rack_name and \ + _candidate.host_name == _node_placements[v].host_name: + conflict = True + break + elif diversity_level == "rack": + if _candidate.cluster_name == _node_placements[v].cluster_name and \ + _candidate.rack_name == _node_placements[v].rack_name: + conflict = True + break + elif diversity_level == "cluster": + if _candidate.cluster_name == _node_placements[v].cluster_name: + conflict = True + break + + return conflict + + def _constrain_non_exclusivity(self, _level, _candidate_list): + conflict_list = [] + + for r in _candidate_list: + if self.conflict_exclusivity(_level, r) is True: + if r not in conflict_list: + conflict_list.append(r) + + debug_resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: exclusivity defined in resource = " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def conflict_exclusivity(self, _level, _candidate): + conflict = False + + memberships = _candidate.get_memberships(_level) + for mk in memberships.keys(): + if memberships[mk].group_type == "EX" and mk.split(":")[0] == _level: + conflict = True + + return conflict + + def get_exclusivities(self, _exclusivity_groups, _level): + exclusivities = {} + + for exk, level in _exclusivity_groups.iteritems(): + if level.split(":")[0] == _level: + exclusivities[exk] = level + + return exclusivities + + def _constrain_exclusivity(self, _level, _exclusivity_id, _candidate_list): + candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id, _candidate_list) + + if len(candidate_list) == 0: + candidate_list = self._get_hibernated_candidates(_level, _candidate_list) + _candidate_list[:] = [x for x in _candidate_list if x in candidate_list] + else: + _candidate_list[:] = [x for x in _candidate_list if x in candidate_list] + + def _get_exclusive_candidates(self, _level, _exclusivity_id, _candidate_list): + candidate_list = [] + + for r in _candidate_list: + if self.exist_group(_level, _exclusivity_id, "EX", r) is True: + if r not in candidate_list: + candidate_list.append(r) + else: + debug_resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: exclusivity not exist in resource = " + debug_resource_name) + + return candidate_list + + def _get_hibernated_candidates(self, _level, _candidate_list): + candidate_list = [] + + for r in _candidate_list: + if self.check_hibernated(_level, r) is True: + if r not in candidate_list: + candidate_list.append(r) + else: + debug_resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: exclusivity not allowed in resource = " + debug_resource_name) + + return candidate_list + + def check_hibernated(self, _level, _candidate): + match = False + + num_of_placed_vms = _candidate.get_num_of_placed_vms(_level) + if num_of_placed_vms == 0: + match = True + + return match + + def _constrain_host_aggregates(self, _level, _n, _candidate_list): + conflict_list = [] + + for r in _candidate_list: + if self.check_host_aggregates(_level, r, _n.node) is False: + if r not in conflict_list: + conflict_list.append(r) + + debug_resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: not meet aggregate in resource = " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def check_host_aggregates(self, _level, _candidate, _v): + return self.openstack_AIES.host_passes(_level, _candidate, _v) + + def _constrain_availability_zone(self, _level, _n, _candidate_list): + conflict_list = [] + + for r in _candidate_list: + if self.check_availability_zone(_level, r, _n.node) is False: + if r not in conflict_list: + conflict_list.append(r) + + debug_resource_name = r.get_resource_name(_level) + self.logger.debug("ConstraintSolver: not meet az in resource = " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def check_availability_zone(self, _level, _candidate, _v): + return self.openstack_AZ.host_passes(_level, _candidate, _v) + + def _constrain_cpu_capacity(self, _level, _n, _candidate_list): + conflict_list = [] + + for ch in _candidate_list: + if self.check_cpu_capacity(_level, _n.node, ch) is False: + conflict_list.append(ch) + + debug_resource_name = ch.get_resource_name(_level) + self.logger.debug("ConstraintSolver: lack of cpu in " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def check_cpu_capacity(self, _level, _v, _candidate): + return self.openstack_C.host_passes(_level, _candidate, _v) + + def _constrain_mem_capacity(self, _level, _n, _candidate_list): + conflict_list = [] + + for ch in _candidate_list: + if self.check_mem_capacity(_level, _n.node, ch) is False: + conflict_list.append(ch) + + debug_resource_name = ch.get_resource_name(_level) + self.logger.debug("ConstraintSolver: lack of mem in " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def check_mem_capacity(self, _level, _v, _candidate): + return self.openstack_R.host_passes(_level, _candidate, _v) + + def _constrain_local_disk_capacity(self, _level, _n, _candidate_list): + conflict_list = [] + + for ch in _candidate_list: + if self.check_local_disk_capacity(_level, _n.node, ch) is False: + conflict_list.append(ch) + + debug_resource_name = ch.get_resource_name(_level) + self.logger.debug("ConstraintSolver: lack of local disk in " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def check_local_disk_capacity(self, _level, _v, _candidate): + return self.openstack_D.host_passes(_level, _candidate, _v) + + def _constrain_storage_capacity(self, _level, _n, _candidate_list): + conflict_list = [] + + for ch in _candidate_list: + if self.check_storage_availability(_level, _n.node, ch) is False: + conflict_list.append(ch) + + debug_resource_name = ch.get_resource_name(_level) + avail_storages = ch.get_avail_storages(_level) + avail_disks = [] + volume_classes = [] + volume_sizes = [] + if isinstance(_n.node, VGroup): + for vck in _n.node.volume_sizes.keys(): + volume_classes.append(vck) + volume_sizes.append(_n.node.volume_sizes[vck]) + else: + volume_classes.append(_n.node.volume_class) + volume_sizes.append(_n.node.volume_size) + + for vc in volume_classes: + for _, s in avail_storages.iteritems(): + if vc == "any" or s.storage_class == vc: + avail_disks.append(s.storage_avail_disk) + + self.logger.debug("ConstraintSolver: storage constrained in resource = " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def check_storage_availability(self, _level, _v, _ch): + available = False + + volume_sizes = [] + if isinstance(_v, VGroup): + for vck in _v.volume_sizes.keys(): + volume_sizes.append((vck, _v.volume_sizes[vck])) + else: + volume_sizes.append((_v.volume_class, _v.volume_size)) + + avail_storages = _ch.get_avail_storages(_level) + for vc, vs in volume_sizes: + for _, s in avail_storages.iteritems(): + if vc == "any" or s.storage_class == vc: + if s.storage_avail_disk >= vs: + available = True + break + else: + available = False + if available is False: + break + + return available + + def _constrain_nw_bandwidth_capacity(self, _level, _n, _node_placements, _candidate_list): + conflict_list = [] + + for cr in _candidate_list: + if self.check_nw_bandwidth_availability(_level, _n, _node_placements, cr) is False: + if cr not in conflict_list: + conflict_list.append(cr) + + debug_resource_name = cr.get_resource_name(_level) + self.logger.debug("ConstraintSolver: bw constrained in resource = " + debug_resource_name) + + _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + + def check_nw_bandwidth_availability(self, _level, _n, _node_placements, _cr): + # NOTE: 3rd entry for special node requiring bandwidth of out-going from spine switch + total_req_bandwidths = [0, 0, 0] + + link_list = _n.get_all_links() + + for vl in link_list: + bandwidth = _n.get_bandwidth_of_link(vl) + + placement_level = None + if vl.node in _node_placements.keys(): # vl.node is VM or Volume + placement_level = _node_placements[vl.node].get_common_placement(_cr) + else: # in the open list + placement_level = _n.get_common_diversity(vl.node.diversity_groups) + if placement_level == "ANY": + implicit_diversity = self.get_implicit_diversity(_n.node, link_list, vl.node, _level) + if implicit_diversity[0] is not None: + placement_level = implicit_diversity[1] + + self.get_req_bandwidths(_level, placement_level, bandwidth, total_req_bandwidths) + + return self._check_nw_bandwidth_availability(_level, total_req_bandwidths, _cr) + + # to find any implicit diversity relation caused by the other links of _v + # (i.e., intersection between _v and _target_v) + def get_implicit_diversity(self, _v, _link_list, _target_v, _level): + max_implicit_diversity = (None, 0) + + for vl in _link_list: + diversity_level = _v.get_common_diversity(vl.node.diversity_groups) + if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level): + for dk, dl in vl.node.diversity_groups.iteritems(): + if LEVELS.index(dl) > LEVELS.index(diversity_level): + if _target_v.uuid != vl.node.uuid: + if dk in _target_v.diversity_groups.keys(): + if LEVELS.index(dl) > max_implicit_diversity[1]: + max_implicit_diversity = (dk, dl) + + return max_implicit_diversity + + def get_req_bandwidths(self, _level, _placement_level, _bandwidth, _total_req_bandwidths): + if _level == "cluster" or _level == "rack": + if _placement_level == "cluster" or _placement_level == "rack": + _total_req_bandwidths[1] += _bandwidth + elif _level == "host": + if _placement_level == "cluster" or _placement_level == "rack": + _total_req_bandwidths[1] += _bandwidth + _total_req_bandwidths[0] += _bandwidth + elif _placement_level == "host": + _total_req_bandwidths[0] += _bandwidth + + def _check_nw_bandwidth_availability(self, _level, _req_bandwidths, _candidate_resource): + available = True + + if _level == "cluster": + cluster_avail_bandwidths = [] + for _, sr in _candidate_resource.cluster_avail_switches.iteritems(): + cluster_avail_bandwidths.append(max(sr.avail_bandwidths)) + + if max(cluster_avail_bandwidths) < _req_bandwidths[1]: + available = False + + elif _level == "rack": + rack_avail_bandwidths = [] + for _, sr in _candidate_resource.rack_avail_switches.iteritems(): + rack_avail_bandwidths.append(max(sr.avail_bandwidths)) + + if max(rack_avail_bandwidths) < _req_bandwidths[1]: + available = False + + elif _level == "host": + host_avail_bandwidths = [] + for _, sr in _candidate_resource.host_avail_switches.iteritems(): + host_avail_bandwidths.append(max(sr.avail_bandwidths)) + + if max(host_avail_bandwidths) < _req_bandwidths[0]: + available = False + + rack_avail_bandwidths = [] + for _, sr in _candidate_resource.rack_avail_switches.iteritems(): + rack_avail_bandwidths.append(max(sr.avail_bandwidths)) + + avail_bandwidth = min(max(host_avail_bandwidths), max(rack_avail_bandwidths)) + if avail_bandwidth < _req_bandwidths[1]: + available = False + + return available diff --git a/valet/engine/optimizer/ostro/openstack_filters.py b/valet/engine/optimizer/ostro/openstack_filters.py new file mode 100755 index 0000000..46e73ff --- /dev/null +++ b/valet/engine/optimizer/ostro/openstack_filters.py @@ -0,0 +1,246 @@ +#!/bin/python + +# Modified: Mar. 15, 2016 + +import openstack_utils +import six + +from valet.engine.optimizer.app_manager.app_topology_base import VM + +_SCOPE = 'aggregate_instance_extra_specs' + + +class AggregateInstanceExtraSpecsFilter(object): + """AggregateInstanceExtraSpecsFilter works with InstanceType records.""" + + # Aggregate data and instance type does not change within a request + run_filter_once_per_request = True + + def __init__(self, _logger): + self.logger = _logger + + def host_passes(self, _level, _host, _v): + """Return a list of hosts that can create instance_type + + Check that the extra specs associated with the instance type match + the metadata provided by aggregates. If not present return False. + """ + + # If 'extra_specs' is not present or extra_specs are empty then we + # need not proceed further + extra_specs_list = [] + for extra_specs in _v.extra_specs_list: + if "host_aggregates" not in extra_specs.keys(): + extra_specs_list.append(extra_specs) + + if len(extra_specs_list) == 0: + return True + + metadatas = openstack_utils.aggregate_metadata_get_by_host(_level, _host) + + matched_logical_group_list = [] + for extra_specs in extra_specs_list: + for lgk, metadata in metadatas.iteritems(): + if self._match_metadata(_host.get_resource_name(_level), lgk, extra_specs, metadata) is True: + matched_logical_group_list.append(lgk) + break + else: + return False + + for extra_specs in _v.extra_specs_list: + if "host_aggregates" in extra_specs.keys(): + extra_specs["host_aggregates"] = matched_logical_group_list + break + else: + host_aggregate_extra_specs = {} + host_aggregate_extra_specs["host_aggregates"] = matched_logical_group_list + _v.extra_specs_list.append(host_aggregate_extra_specs) + + return True + + def _match_metadata(self, _h_name, _lg_name, _extra_specs, _metadata): + for key, req in six.iteritems(_extra_specs): + # Either not scope format, or aggregate_instance_extra_specs scope + scope = key.split(':', 1) + if len(scope) > 1: + if scope[0] != _SCOPE: + continue + else: + del scope[0] + key = scope[0] + + if key == "host_aggregates": + continue + + aggregate_vals = _metadata.get(key, None) + if not aggregate_vals: + self.logger.debug("key (" + key + ") not exists in logical_group (" + _lg_name + ") " + " of host (" + _h_name + ")") + return False + for aggregate_val in aggregate_vals: + if openstack_utils.match(aggregate_val, req): + break + else: + self.logger.debug("key (" + key + ")'s value (" + req + ") not exists in logical_group " + "(" + _lg_name + ") " + " of host (" + _h_name + ")") + return False + + return True + + +# NOTE: originally, OpenStack used the metadata of host_aggregate +class AvailabilityZoneFilter(object): + """ Filters Hosts by availability zone. + + Works with aggregate metadata availability zones, using the key + 'availability_zone' + Note: in theory a compute node can be part of multiple availability_zones + """ + + # Availability zones do not change within a request + run_filter_once_per_request = True + + def __init__(self, _logger): + self.logger = _logger + + def host_passes(self, _level, _host, _v): + az_request_list = [] + if isinstance(_v, VM): + az_request_list.append(_v.availability_zone) + else: + for az in _v.availability_zone_list: + az_request_list.append(az) + + if len(az_request_list) == 0: + return True + + # metadatas = openstack_utils.aggregate_metadata_get_by_host(_level, _host, key='availability_zone') + availability_zone_list = openstack_utils.availability_zone_get_by_host(_level, _host) + + for azr in az_request_list: + if azr not in availability_zone_list: + self.logger.debug("AZ (" + azr + ") not exists in host " + "(" + _host.get_resource_name(_level) + ")") + return False + + return True + + ''' if 'availability_zone' in metadata: + + hosts_passes = availability_zone in metadata['availability_zone'] + host_az = metadata['availability_zone'] + else: + hosts_passes = availability_zone == CONF.default_availability_zone + host_az = CONF.default_availability_zone + + if not hosts_passes: + LOG.debug("Availability Zone '%(az)s' requested. " + "%(host_state)s has AZs: %(host_az)s", + {'host_state': host_state, + 'az': availability_zone, + 'host_az': host_az}) + + return hosts_passes + ''' + + +class RamFilter(object): + + def __init__(self, _logger): + self.logger = _logger + + def host_passes(self, _level, _host, _v): + """Only return hosts with sufficient available RAM.""" + requested_ram = _v.mem # MB + # free_ram_mb = host_state.free_ram_mb + # total_usable_ram_mb = host_state.total_usable_ram_mb + (total_ram, usable_ram) = _host.get_mem(_level) + + # Do not allow an instance to overcommit against itself, only against other instances. + if not total_ram >= requested_ram: + self.logger.debug("requested mem (" + str(requested_ram) + ") more than total mem (" + + str(total_ram) + ") in host (" + _host.get_resource_name(_level) + ")") + return False + + # ram_allocation_ratio = self._get_ram_allocation_ratio(host_state, spec_obj) + + # m emory_mb_limit = total_usable_ram_mb * ram_allocation_ratio + # used_ram_mb = total_usable_ram_mb - free_ram_mb + # usable_ram = memory_mb_limit - used_ram_mb + + if not usable_ram >= requested_ram: + self.logger.debug("requested mem (" + str(requested_ram) + ") more than avail mem (" + + str(usable_ram) + ") in host (" + _host.get_resource_name(_level) + ")") + return False + + # save oversubscription limit for compute node to test against: + # host_state.limits['memory_mb'] = memory_mb_limit + return True + + +class CoreFilter(object): + + def __init__(self, _logger): + self.logger = _logger + + def host_passes(self, _level, _host, _v): + """Return True if host has sufficient CPU cores.""" + (vCPUs, avail_vCPUs) = _host.get_vCPUs(_level) + ''' if avail_vcpus == 0: + + Fail safe + LOG.warning(_LW("VCPUs not set; assuming CPU collection broken")) + return True + ''' + + instance_vCPUs = _v.vCPUs + # cpu_allocation_ratio = self._get_cpu_allocation_ratio(host_state, spec_obj) + # vcpus_total = host_state.vcpus_total * cpu_allocation_ratio + + # Only provide a VCPU limit to compute if the virt driver is reporting + # an accurate count of installed VCPUs. (XenServer driver does not) + ''' + if vcpus_total > 0: + host_state.limits['vcpu'] = vcpus_total + ''' + + # Do not allow an instance to overcommit against itself, only against other instances. + if instance_vCPUs > vCPUs: + self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than total vCPUs (" + + str(vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")") + return False + + # free_vcpus = vcpus_total - host_state.vcpus_used + if avail_vCPUs < instance_vCPUs: + self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than avail vCPUs (" + + str(avail_vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")") + return False + + return True + + +class DiskFilter(object): + + def __init__(self, _logger): + self.logger = _logger + + def host_passes(self, _level, _host, _v): + """Filter based on disk usage.""" + # requested_disk = (1024 * (spec_obj.root_gb + spec_obj.ephemeral_gb) + spec_obj.swap) + requested_disk = _v.local_volume_size + (_, usable_disk) = _host.get_local_disk(_level) + + # free_disk_mb = host_state.free_disk_mb + # total_usable_disk_mb = host_state.total_usable_disk_gb * 1024 + + # disk_allocation_ratio = self._get_disk_allocation_ratio(host_state, spec_obj) + + # disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio + # used_disk_mb = total_usable_disk_mb - free_disk_mb + # usable_disk_mb = disk_mb_limit - used_disk_mb + + if not usable_disk >= requested_disk: + self.logger.debug("requested disk (" + str(requested_disk) + ") more than avail disk (" + + str(usable_disk) + ") in host (" + _host.get_resource_name(_level) + ")") + return False + + # disk_gb_limit = disk_mb_limit / 1024 + # host_state.limits['disk_gb'] = disk_gb_limit + return True diff --git a/valet/engine/optimizer/ostro/openstack_utils.py b/valet/engine/optimizer/ostro/openstack_utils.py new file mode 100755 index 0000000..1993b21 --- /dev/null +++ b/valet/engine/optimizer/ostro/openstack_utils.py @@ -0,0 +1,90 @@ +#!/bin/python + +# Modified: Mar. 15, 2016 + + +import collections +import operator + + +# 1. The following operations are supported: +# =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= +# 2. Note that is handled in a different way below. +# 3. If the first word in the extra_specs is not one of the operators, +# it is ignored. +op_methods = {'=': lambda x, y: float(x) >= float(y), + '': lambda x, y: y in x, + '': lambda x, y: all(val in x for val in y), + '==': lambda x, y: float(x) == float(y), + '!=': lambda x, y: float(x) != float(y), + '>=': lambda x, y: float(x) >= float(y), + '<=': lambda x, y: float(x) <= float(y), + 's==': operator.eq, + 's!=': operator.ne, + 's<': operator.lt, + 's<=': operator.le, + 's>': operator.gt, + 's>=': operator.ge} + + +def match(value, req): + words = req.split() + + op = method = None + if words: + op = words.pop(0) + method = op_methods.get(op) + + if op != '' and not method: + return value == req + + if value is None: + return False + + if op == '': # Ex: v1 v2 v3 + while True: + if words.pop(0) == value: + return True + if not words: + break + words.pop(0) # remove a keyword + if not words: + break + return False + + if words: + if op == '': # requires a list not a string + return method(value, words) + return method(value, words[0]) + return False + + +def aggregate_metadata_get_by_host(_level, _host, _key=None): + """Returns a dict of all metadata based on a metadata key for a specific host. If the key is not provided, returns a dict of all metadata.""" + + metadatas = {} + + logical_groups = _host.get_memberships(_level) + + for lgk, lg in logical_groups.iteritems(): + if lg.group_type == "AGGR": + if _key is None or _key in lg.metadata: + metadata = collections.defaultdict(set) + for k, v in lg.metadata.items(): + metadata[k].update(x.strip() for x in v.split(',')) + + metadatas[lgk] = metadata + + return metadatas + + +# NOTE: this function not exist in OpenStack +def availability_zone_get_by_host(_level, _host): + availability_zone_list = [] + + logical_groups = _host.get_memberships(_level) + for lgk, lg in logical_groups.iteritems(): + if lg.group_type == "AZ": + availability_zone_list.append(lgk) + + return availability_zone_list diff --git a/valet/engine/optimizer/ostro/optimizer.py b/valet/engine/optimizer/ostro/optimizer.py new file mode 100755 index 0000000..e816974 --- /dev/null +++ b/valet/engine/optimizer/ostro/optimizer.py @@ -0,0 +1,196 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + +import time + +from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume +from valet.engine.optimizer.ostro.search import Search + + +class Optimizer(object): + + def __init__(self, _resource, _logger): + self.resource = _resource + self.logger = _logger + + self.search = Search(self.logger) + + self.status = "success" + + def place(self, _app_topology): + success = False + + uuid_map = None + place_type = None + + start_ts = time.time() + + if len(_app_topology.candidate_list_map) > 0: + place_type = "replan" + elif len(_app_topology.exclusion_list_map) > 0: + place_type = "migration" + else: + place_type = "create" + + if place_type == "migration": + vm_id = _app_topology.exclusion_list_map.keys()[0] + candidate_host_list = [] + for hk in self.resource.hosts.keys(): + if hk not in _app_topology.exclusion_list_map[vm_id]: + candidate_host_list.append(hk) + _app_topology.candidate_list_map[vm_id] = candidate_host_list + + if place_type == "replan" or place_type == "migration": + success = self.search.re_place_nodes(_app_topology, self.resource) + if success is True: + if len(_app_topology.old_vm_map) > 0: + uuid_map = self._delete_old_vms(_app_topology.old_vm_map) + self.resource.update_topology(store=False) + + self.logger.debug("Optimizer: remove old placements for replan") + else: + success = self.search.place_nodes(_app_topology, self.resource) + + end_ts = time.time() + + if success is True: + + self.logger.debug("Optimizer: search running time = " + str(end_ts - start_ts) + " sec") + self.logger.debug("Optimizer: total bandwidth = " + str(self.search.bandwidth_usage)) + self.logger.debug("Optimizer: total number of hosts = " + str(self.search.num_of_hosts)) + + placement_map = {} + for v in self.search.node_placements.keys(): + if isinstance(v, VM): + placement_map[v] = self.search.node_placements[v].host_name + elif isinstance(v, Volume): + placement_map[v] = self.search.node_placements[v].host_name + "@" + placement_map[v] += self.search.node_placements[v].storage.storage_name + elif isinstance(v, VGroup): + if v.level == "host": + placement_map[v] = self.search.node_placements[v].host_name + elif v.level == "rack": + placement_map[v] = self.search.node_placements[v].rack_name + elif v.level == "cluster": + placement_map[v] = self.search.node_placements[v].cluster_name + + self.logger.debug(" " + v.name + " placed in " + placement_map[v]) + + self._update_resource_status(uuid_map) + + return placement_map + + else: + self.status = self.search.status + return None + + def _delete_old_vms(self, _old_vm_map): + uuid_map = {} + + for h_uuid, info in _old_vm_map.iteritems(): + uuid = self.resource.get_uuid(h_uuid, info[0]) + if uuid is not None: + uuid_map[h_uuid] = uuid + + self.resource.remove_vm_by_h_uuid_from_host(info[0], h_uuid, info[1], info[2], info[3]) + self.resource.update_host_time(info[0]) + + host = self.resource.hosts[info[0]] + self.resource.remove_vm_by_h_uuid_from_logical_groups(host, h_uuid) + + return uuid_map + + def _update_resource_status(self, _uuid_map): + for v, np in self.search.node_placements.iteritems(): + + if isinstance(v, VM): + uuid = "none" + if _uuid_map is not None: + if v.uuid in _uuid_map.keys(): + uuid = _uuid_map[v.uuid] + + self.resource.add_vm_to_host(np.host_name, + (v.uuid, v.name, uuid), + v.vCPUs, v.mem, v.local_volume_size) + + for vl in v.vm_list: + tnp = self.search.node_placements[vl.node] + placement_level = np.get_common_placement(tnp) + self.resource.deduct_bandwidth(np.host_name, placement_level, vl.nw_bandwidth) + + for voll in v.volume_list: + tnp = self.search.node_placements[voll.node] + placement_level = np.get_common_placement(tnp) + self.resource.deduct_bandwidth(np.host_name, placement_level, voll.io_bandwidth) + + self._update_logical_grouping(v, self.search.avail_hosts[np.host_name], uuid) + + self.resource.update_host_time(np.host_name) + + elif isinstance(v, Volume): + self.resource.add_vol_to_host(np.host_name, np.storage.storage_name, v.name, v.volume_size) + + for vl in v.vm_list: + tnp = self.search.node_placements[vl.node] + placement_level = np.get_common_placement(tnp) + self.resource.deduct_bandwidth(np.host_name, placement_level, vl.io_bandwidth) + + self.resource.update_storage_time(np.storage.storage_name) + + def _update_logical_grouping(self, _v, _avail_host, _uuid): + for lgk, lg in _avail_host.host_memberships.iteritems(): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + lg_name = lgk.split(":") + if lg_name[0] == "host" and lg_name[1] != "any": + self.resource.add_logical_group(_avail_host.host_name, lgk, lg.group_type) + + if _avail_host.rack_name != "any": + for lgk, lg in _avail_host.rack_memberships.iteritems(): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + lg_name = lgk.split(":") + if lg_name[0] == "rack" and lg_name[1] != "any": + self.resource.add_logical_group(_avail_host.rack_name, lgk, lg.group_type) + + if _avail_host.cluster_name != "any": + for lgk, lg in _avail_host.cluster_memberships.iteritems(): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + lg_name = lgk.split(":") + if lg_name[0] == "cluster" and lg_name[1] != "any": + self.resource.add_logical_group(_avail_host.cluster_name, lgk, lg.group_type) + + vm_logical_groups = [] + self._collect_logical_groups_of_vm(_v, vm_logical_groups) + + host = self.resource.hosts[_avail_host.host_name] + self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid), vm_logical_groups) + + def _collect_logical_groups_of_vm(self, _v, _vm_logical_groups): + if isinstance(_v, VM): + for es in _v.extra_specs_list: + if "host_aggregates" in es.keys(): + lg_list = es["host_aggregates"] + for lgk in lg_list: + if lgk not in _vm_logical_groups: + _vm_logical_groups.append(lgk) + + if _v.availability_zone is not None: + az = _v.availability_zone.split(":")[0] + if az not in _vm_logical_groups: + _vm_logical_groups.append(az) + + for _, level in _v.exclusivity_groups.iteritems(): + if level not in _vm_logical_groups: + _vm_logical_groups.append(level) + + for _, level in _v.diversity_groups.iteritems(): + if level not in _vm_logical_groups: + _vm_logical_groups.append(level) + + if isinstance(_v, VGroup): + name = _v.level + ":" + _v.name + if name not in _vm_logical_groups: + _vm_logical_groups.append(name) + + if _v.survgroup is not None: + self._collect_logical_groups_of_vm(_v.survgroup, _vm_logical_groups) diff --git a/valet/engine/optimizer/ostro/ostro.py b/valet/engine/optimizer/ostro/ostro.py new file mode 100755 index 0000000..631a1da --- /dev/null +++ b/valet/engine/optimizer/ostro/ostro.py @@ -0,0 +1,633 @@ +#!/bin/python + +# Modified: Oct. 1, 2016 + + +from oslo_config import cfg +import threading +import time +import traceback +from valet.engine.listener.listener_manager import ListenerManager +from valet.engine.optimizer.app_manager.app_handler import AppHandler +from valet.engine.optimizer.app_manager.app_topology_base import VM, Volume +from valet.engine.optimizer.db_connect.music_handler import MusicHandler +from valet.engine.optimizer.ostro.optimizer import Optimizer +from valet.engine.resource_manager.compute_manager import ComputeManager +from valet.engine.resource_manager.resource import Resource +from valet.engine.resource_manager.topology_manager import TopologyManager + +CONF = cfg.CONF + + +class Ostro(object): + + def __init__(self, _config, _logger): + self.config = _config + self.logger = _logger + + self.db = MusicHandler(self.config, self.logger) + if self.db.init_db() is False: + self.logger.error("Ostro.__init__: error while initializing MUSIC database") + else: + self.logger.debug("Ostro.__init__: done init music") + + self.resource = Resource(self.db, self.config, self.logger) + self.logger.debug("done init resource") + + self.app_handler = AppHandler(self.resource, self.db, self.config, self.logger) + self.logger.debug("done init apphandler") + + self.optimizer = Optimizer(self.resource, self.logger) + self.logger.debug("done init optimizer") + + self.data_lock = threading.Lock() + self.thread_list = [] + + self.topology = TopologyManager(1, "Topology", self.resource, self.data_lock, self.config, self.logger) + self.logger.debug("done init topology") + + self.compute = ComputeManager(2, "Compute", self.resource, self.data_lock, self.config, self.logger) + self.logger.debug("done init compute") + + self.listener = ListenerManager(3, "Listener", CONF) + self.logger.debug("done init listener") + + self.status = "success" + self.end_of_process = False + + def run_ostro(self): + self.logger.info("Ostro.run_ostro: start Ostro ......") + + self.topology.start() + self.compute.start() + self.listener.start() + + self.thread_list.append(self.topology) + self.thread_list.append(self.compute) + self.thread_list.append(self.listener) + + ''' for monitoring test ''' + # duration = 30.0 + # expired = time.time() + duration + + while self.end_of_process is False: + time.sleep(1) + + event_list = self.db.get_events() + if event_list is None: + break + if len(event_list) > 0: + if self.handle_events(event_list) is False: + break + + request_list = self.db.get_requests() + if request_list is None: + break + if len(request_list) > 0: + if self.place_app(request_list) is False: + break + + ''' for monitoring test ''' + # current = time.time() + # if current > expired: + # self.logger.debug("test: ostro running ......") + # expired = current + duration + + self.topology.end_of_process = True + self.compute.end_of_process = True + + for t in self.thread_list: + t.join() + + self.logger.info("Ostro.run_ostro: exit Ostro") + + def stop_ostro(self): + self.end_of_process = True + + while len(self.thread_list) > 0: + time.sleep(1) + for t in self.thread_list: + if not t.is_alive(): + self.thread_list.remove(t) + + def bootstrap(self): + self.logger.info("Ostro.bootstrap: start bootstrap") + + try: + resource_status = self.db.get_resource_status(self.resource.datacenter.name) + if resource_status is None: + return False + + if len(resource_status) > 0: + self.logger.info("Ostro.bootstrap: bootstrap from db") + if self.resource.bootstrap_from_db(resource_status) is False: + return False + else: + self.logger.info("bootstrap from OpenStack") + + if self._set_hosts() is False: + self.logger.error('_set_hosts is false') + return False + + if self._set_flavors() is False: + self.logger.info("_set_flavors is false") + return False + + if self._set_topology() is False: + self.logger.error("_set_topology is false") + return False + + self.resource.update_topology() + + except Exception: + self.logger.critical("Ostro.bootstrap failed: " + traceback.format_exc()) + + self.logger.info("Ostro.bootstrap: done bootstrap") + + return True + + def _set_topology(self): + if self.topology.set_topology() is False: + self.status = "datacenter configuration error" + return False + + self.logger.debug("done topology bootstrap") + + return True + + def _set_hosts(self): + if self.compute.set_hosts() is False: + self.status = "OpenStack (Nova) internal error" + return False + self.logger.debug("done hosts & groups bootstrap") + return True + + def _set_flavors(self): + self.logger.debug("start flavors bootstrap") + if self.compute.set_flavors() is False: + self.status = "OpenStack (Nova) internal error" + return False + + self.logger.debug("done flavors bootstrap") + + return True + + def place_app(self, _app_data): + self.data_lock.acquire() + + start_time = time.time() + + query_request_list = [] + placement_request_list = [] + for req in _app_data: + if req["action"] == "query": + query_request_list.append(req) + else: + placement_request_list.append(req) + + if len(query_request_list) > 0: + self.logger.info("Ostro.place_app: start query") + + query_results = self._query(query_request_list) + + result = self._get_json_results("query", "ok", self.status, query_results) + + if self.db.put_result(result) is False: + self.data_lock.release() + return False + + self.logger.info("Ostro.place_app: done query") + + if len(placement_request_list) > 0: + + self.logger.info("Ostro.place_app: start app placement") + + result = None + + placement_map = self._place_app(placement_request_list) + + if placement_map is None: + result = self._get_json_results("placement", "error", self.status, placement_map) + else: + result = self._get_json_results("placement", "ok", "success", placement_map) + + if self.db.put_result(result) is False: + self.data_lock.release() + return False + + self.logger.info("Ostro.place_app: done app placement") + + end_time = time.time() + + self.logger.info("Ostro.place_app: total decision delay of request = " + str(end_time - start_time) + " sec") + + self.data_lock.release() + return True + + def _query(self, _query_list): + query_results = {} + + for q in _query_list: + if "type" in q.keys(): + if q["type"] == "group_vms": + if "parameters" in q.keys(): + params = q["parameters"] + if "group_name" in params.keys(): + vm_list = self._get_vms_from_logical_group(params["group_name"]) + query_results[q["stack_id"]] = vm_list + else: + self.status = "unknown paramenter in query" + self.logger.warn("Ostro._query: " + self.status) + query_results[q["stack_id"]] = None + else: + self.status = "no parameters in query" + self.logger.warn("Ostro._query: " + self.status) + query_results[q["stack_id"]] = None + elif q["type"] == "all_groups": + query_results[q["stack_id"]] = self._get_logical_groups() + else: + self.status = "unknown query type" + self.logger.warn("Ostro._query: " + self.status) + query_results[q["stack_id"]] = None + else: + self.status = "no type in query" + self.logger.warn("Ostro._query: " + self.status) + query_results[q["stack_id"]] = None + + return query_results + + def _get_vms_from_logical_group(self, _group_name): + vm_list = [] + + vm_id_list = [] + for lgk, lg in self.resource.logical_groups.iteritems(): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + lg_id = lgk.split(":") + if lg_id[1] == _group_name: + vm_id_list = lg.vm_list + break + + for vm_id in vm_id_list: + if vm_id[2] != "none": # if physical_uuid != 'none' + vm_list.append(vm_id[2]) + + return vm_list + + def _get_logical_groups(self): + logical_groups = {} + + for lgk, lg in self.resource.logical_groups.iteritems(): + logical_groups[lgk] = lg.get_json_info() + + return logical_groups + + def _place_app(self, _app_data): + ''' set application topology ''' + app_topology = self.app_handler.add_app(_app_data) + if app_topology is None: + self.status = self.app_handler.status + self.logger.debug("Ostro._place_app: error while register requested apps: " + self.status) + return None + + ''' check and set vm flavor information ''' + for _, vm in app_topology.vms.iteritems(): + if self._set_vm_flavor_information(vm) is False: + self.status = "fail to set flavor information" + self.logger.error("Ostro._place_app: " + self.status) + return None + for _, vg in app_topology.vgroups.iteritems(): + if self._set_vm_flavor_information(vg) is False: + self.status = "fail to set flavor information in a group" + self.logger.error("Ostro._place_app: " + self.status) + return None + + ''' set weights for optimization ''' + app_topology.set_weight() + app_topology.set_optimization_priority() + + ''' perform search for optimal placement of app topology ''' + placement_map = self.optimizer.place(app_topology) + if placement_map is None: + self.status = self.optimizer.status + self.logger.debug("Ostro._place_app: error while optimizing app placement: " + self.status) + return None + + ''' update resource and app information ''' + if len(placement_map) > 0: + self.resource.update_topology() + self.app_handler.add_placement(placement_map, self.resource.current_timestamp) + if len(app_topology.exclusion_list_map) > 0 and len(app_topology.planned_vm_map) > 0: + for vk in app_topology.planned_vm_map.keys(): + if vk in placement_map.keys(): + del placement_map[vk] + + return placement_map + + def _set_vm_flavor_information(self, _v): + if isinstance(_v, VM): + if self._set_vm_flavor_properties(_v) is False: + return False + else: # affinity group + for _, sg in _v.subvgroups.iteritems(): + if self._set_vm_flavor_information(sg) is False: + return False + + def _set_vm_flavor_properties(self, _vm): + flavor = self.resource.get_flavor(_vm.flavor) + + if flavor is None: + self.logger.warn("Ostro._set_vm_flavor_properties: does not exist flavor (" + _vm.flavor + ") and try to refetch") + + ''' reset flavor resource and try again ''' + if self._set_flavors() is False: + return False + self.resource.update_topology() + flavor = self.resource.get_flavor(_vm.flavor) + if flavor is None: + return False + + _vm.vCPUs = flavor.vCPUs + _vm.mem = flavor.mem_cap + _vm.local_volume_size = flavor.disk_cap + + if len(flavor.extra_specs) > 0: + extra_specs = {} + for mk, mv in flavor.extra_specs.iteritems(): + extra_specs[mk] = mv + _vm.extra_specs_list.append(extra_specs) + + return True + + def handle_events(self, _event_list): + self.data_lock.acquire() + + resource_updated = False + + for e in _event_list: + if e.host is not None and e.host != "none": + if self._check_host(e.host) is False: + self.logger.warn("Ostro.handle_events: host (" + e.host + ") related to this event not exists") + continue + + if e.method == "build_and_run_instance": # VM is created (from stack) + self.logger.debug("Ostro.handle_events: got build_and_run event") + if self.db.put_uuid(e) is False: + self.data_lock.release() + return False + + elif e.method == "object_action": + if e.object_name == 'Instance': # VM became active or deleted + orch_id = self.db.get_uuid(e.uuid) + if orch_id is None: + self.data_lock.release() + return False + + if e.vm_state == "active": + self.logger.debug("Ostro.handle_events: got instance_active event") + vm_info = self.app_handler.get_vm_info(orch_id[1], orch_id[0], e.host) + if vm_info is None: + self.logger.error("Ostro.handle_events: error while getting app info from MUSIC") + self.data_lock.release() + return False + + if len(vm_info) == 0: + ''' + h_uuid is None or "none" because vm is not created by stack + or, stack not found because vm is created by the other stack + ''' + self.logger.warn("Ostro.handle_events: no vm_info found in app placement record") + self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) + else: + if "planned_host" in vm_info.keys() and vm_info["planned_host"] != e.host: + ''' + vm is activated in the different host + ''' + self.logger.warn("Ostro.handle_events: vm activated in the different host") + self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) + + self._remove_vm_from_host(e.uuid, orch_id[0], + vm_info["planned_host"], + float(vm_info["cpus"]), + float(vm_info["mem"]), + float(vm_info["local_volume"])) + + self._remove_vm_from_logical_groups(e.uuid, orch_id[0], vm_info["planned_host"]) + else: + ''' + found vm in the planned host, + possibly the vm deleted in the host while batch cleanup + ''' + if self._check_h_uuid(orch_id[0], e.host) is False: + self.logger.debug("Ostro.handle_events: planned vm was deleted") + if self._check_uuid(e.uuid, e.host) is True: + self._update_h_uuid_in_host(orch_id[0], e.uuid, e.host) + self._update_h_uuid_in_logical_groups(orch_id[0], e.uuid, e.host) + else: + self.logger.debug("Ostro.handle_events: vm activated as planned") + self._update_uuid_in_host(orch_id[0], e.uuid, e.host) + self._update_uuid_in_logical_groups(orch_id[0], e.uuid, e.host) + + resource_updated = True + + elif e.vm_state == "deleted": + self.logger.debug("Ostro.handle_events: got instance_delete event") + + self._remove_vm_from_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) + self._remove_vm_from_logical_groups(e.uuid, orch_id[0], e.host) + + if self.app_handler.update_vm_info(orch_id[1], orch_id[0]) is False: + self.logger.error("Ostro.handle_events: error while updating app in MUSIC") + self.data_lock.release() + return False + + resource_updated = True + + else: + self.logger.warn("Ostro.handle_events: unknown vm_state = " + e.vm_state) + + elif e.object_name == 'ComputeNode': # Host resource is updated + self.logger.debug("Ostro.handle_events: got compute event") + # NOTE: what if host is disabled? + if self.resource.update_host_resources(e.host, e.status, + e.vcpus, e.vcpus_used, + e.mem, e.free_mem, + e.local_disk, e.free_local_disk, + e.disk_available_least) is True: + self.resource.update_host_time(e.host) + + resource_updated = True + + else: + self.logger.warn("Ostro.handle_events: unknown object_name = " + e.object_name) + else: + self.logger.warn("Ostro.handle_events: unknown event method = " + e.method) + + if resource_updated is True: + self.resource.update_topology() + + for e in _event_list: + if self.db.delete_event(e.event_id) is False: + self.data_lock.release() + return False + if e.method == "object_action": + if e.object_name == 'Instance': + if e.vm_state == "deleted": + if self.db.delete_uuid(e.uuid) is False: + self.data_lock.release() + return False + + self.data_lock.release() + + return True + + def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk): + vm_id = None + if _h_uuid is None: + vm_id = ("none", "none", _uuid) + else: + vm_id = (_h_uuid, "none", _uuid) + + self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem, _local_disk) + self.resource.update_host_time(_host_name) + + def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk): + if self._check_h_uuid(_h_uuid, _host_name) is True: + self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid, _vcpus, _mem, _local_disk) + self.resource.update_host_time(_host_name) + else: + if self._check_uuid(_uuid, _host_name) is True: + self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid, _vcpus, _mem, _local_disk) + self.resource.update_host_time(_host_name) + + def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name): + host = self.resource.hosts[_host_name] + if _h_uuid is not None and _h_uuid != "none": + self.resource.remove_vm_by_h_uuid_from_logical_groups(host, _h_uuid) + else: + self.resource.remove_vm_by_uuid_from_logical_groups(host, _uuid) + + def _check_host(self, _host_name): + exist = False + + for hk in self.resource.hosts.keys(): + if hk == _host_name: + exist = True + break + + return exist + + def _check_h_uuid(self, _h_uuid, _host_name): + if _h_uuid is None or _h_uuid == "none": + return False + + host = self.resource.hosts[_host_name] + + return host.exist_vm_by_h_uuid(_h_uuid) + + def _check_uuid(self, _uuid, _host_name): + if _uuid is None or _uuid == "none": + return False + + host = self.resource.hosts[_host_name] + + return host.exist_vm_by_uuid(_uuid) + + def _update_uuid_in_host(self, _h_uuid, _uuid, _host_name): + host = self.resource.hosts[_host_name] + if host.update_uuid(_h_uuid, _uuid) is True: + self.resource.update_host_time(_host_name) + else: + self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid in host = " + host.name) + + def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name): + host = self.resource.hosts[_host_name] + if host.update_h_uuid(_h_uuid, _uuid) is True: + self.resource.update_host_time(_host_name) + + def _update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host_name): + host = self.resource.hosts[_host_name] + + self.resource.update_uuid_in_logical_groups(_h_uuid, _uuid, host) + + def _update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host_name): + host = self.resource.hosts[_host_name] + + self.resource.update_h_uuid_in_logical_groups(_h_uuid, _uuid, host) + + def _get_json_results(self, _request_type, _status_type, _status_message, _map): + result = {} + + if _request_type == "query": + for qk, qr in _map.iteritems(): + query_result = {} + + query_status = {} + if qr is None: + query_status['type'] = "error" + query_status['message'] = _status_message + else: + query_status['type'] = "ok" + query_status['message'] = "success" + + query_result['status'] = query_status + if qr is not None: + query_result['resources'] = qr + + result[qk] = query_result + + else: + if _status_type != "error": + applications = {} + for v in _map.keys(): + if isinstance(v, VM) or isinstance(v, Volume): + resources = None + if v.app_uuid in applications.keys(): + resources = applications[v.app_uuid] + else: + resources = {} + applications[v.app_uuid] = resources + + host = _map[v] + resource_property = {"host": host} + properties = {"properties": resource_property} + resources[v.uuid] = properties + + for appk, app_resources in applications.iteritems(): + app_result = {} + app_status = {} + + app_status['type'] = _status_type + app_status['message'] = _status_message + + app_result['status'] = app_status + app_result['resources'] = app_resources + + result[appk] = app_result + + for appk, app in self.app_handler.apps.iteritems(): + if app.request_type == "ping": + app_result = {} + app_status = {} + + app_status['type'] = _status_type + app_status['message'] = "ping" + + app_result['status'] = app_status + app_result['resources'] = {"ip": self.config.ip} + + result[appk] = app_result + + else: + for appk in self.app_handler.apps.keys(): + app_result = {} + app_status = {} + + app_status['type'] = _status_type + app_status['message'] = _status_message + + app_result['status'] = app_status + app_result['resources'] = {} + + result[appk] = app_result + + return result diff --git a/valet/engine/optimizer/ostro/search.py b/valet/engine/optimizer/ostro/search.py new file mode 100755 index 0000000..997aa81 --- /dev/null +++ b/valet/engine/optimizer/ostro/search.py @@ -0,0 +1,1959 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + +import copy +import operator + +from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume, LEVELS +from valet.engine.optimizer.ostro.constraint_solver import ConstraintSolver +from valet.engine.optimizer.ostro.search_base import compute_reservation +from valet.engine.optimizer.ostro.search_base import Node, Resource, LogicalGroupResource +from valet.engine.optimizer.ostro.search_base import SwitchResource, StorageResource +from valet.engine.resource_manager.resource_base import Datacenter + + +class Search(object): + + def __init__(self, _logger): + self.logger = _logger + + ''' search inputs ''' + self.resource = None + self.app_topology = None + + ''' snapshot of current resource status ''' + self.avail_hosts = {} + self.avail_logical_groups = {} + self.avail_storage_hosts = {} + self.avail_switches = {} + + ''' search results ''' + self.node_placements = {} + self.bandwidth_usage = 0 + self.num_of_hosts = 0 + + ''' for replan ''' + self.planned_placements = {} + + ''' optimization criteria ''' + self.nw_bandwidth_weight = -1 + self.CPU_weight = -1 + self.mem_weight = -1 + self.local_disk_weight = -1 + self.disk_weight = -1 + + self.constraint_solver = None + + self.status = "success" + + def _init_placements(self): + self.avail_hosts.clear() + self.avail_logical_groups.clear() + self.avail_storage_hosts.clear() + self.avail_switches.clear() + + self.node_placements.clear() + self.bandwidth_usage = 0 + self.num_of_hosts = 0 + + self.planned_placements.clear() + + self.nw_bandwidth_weight = -1 + self.CPU_weight = -1 + self.mem_weight = -1 + self.local_disk_weight = -1 + self.disk_weight = -1 + + def copy_resource_status(self, _resource): + self._init_placements() + + self.resource = _resource + + self._create_avail_logical_groups() + self._create_avail_storage_hosts() + self._create_avail_switches() + self._create_avail_hosts() + + def place_nodes(self, _app_topology, _resource): + self._init_placements() + + self.app_topology = _app_topology + + ''' ping request ''' + if self.app_topology.optimization_priority is None: + return True + + self.resource = _resource + + self.constraint_solver = ConstraintSolver(self.logger) + + self.logger.info("Search: start search") + + self._create_avail_logical_groups() + self._create_avail_storage_hosts() + self._create_avail_switches() + self._create_avail_hosts() + + self._compute_resource_weights() + + init_level = LEVELS[len(LEVELS) - 1] + (open_node_list, level) = self._open_list(self.app_topology.vms, + self.app_topology.volumes, + self.app_topology.vgroups, + init_level) + + ''' start from 'rack' level ''' + + return self._run_greedy(open_node_list, level, self.avail_hosts) + + def re_place_nodes(self, _app_topology, _resource): + self._init_placements() + + self.app_topology = _app_topology + self.resource = _resource + + self.constraint_solver = ConstraintSolver(self.logger) + + self.logger.info("Search: start search for replan") + + self._create_avail_logical_groups() + self._create_avail_storage_hosts() + self._create_avail_switches() + self._create_avail_hosts() + + if len(self.app_topology.old_vm_map) > 0: + self._adjust_resources() + self.logger.debug("Search: adjust resources by deducting prior placements") + + self._compute_resource_weights() + + self.logger.debug("Search: first, place already-planned nodes") + + ''' reconsider all vms to be migrated together ''' + if len(_app_topology.exclusion_list_map) > 0: + self._set_no_migrated_list() + + if self._place_planned_nodes() is False: + self.status = "cannot replan VMs that was planned" + self.logger.error("Search: " + self.status) + return False + + self.logger.debug("Search: second, re-place not-planned nodes") + + init_level = LEVELS[len(LEVELS) - 1] + (open_node_list, level) = self._open_list(self.app_topology.vms, + self.app_topology.volumes, + self.app_topology.vgroups, + init_level) + if open_node_list is None: + self.logger.error("Search: fail to replan") + return False + + for v, ah in self.planned_placements.iteritems(): + self.node_placements[v] = ah + + return self._run_greedy(open_node_list, level, self.avail_hosts) + + def _set_no_migrated_list(self): + migrated_vm_id = self.app_topology.candidate_list_map.keys()[0] + + if migrated_vm_id not in self.app_topology.vms.keys(): + vgroup = self._get_vgroup_of_vm(migrated_vm_id, self.app_topology.vgroups) + if vgroup is not None: + vm_list = [] + self._get_child_vms(vgroup, vm_list, migrated_vm_id) + for vk in vm_list: + if vk in self.app_topology.planned_vm_map.keys(): + del self.app_topology.planned_vm_map[vk] + else: + self.logger.error("Search: migrated " + migrated_vm_id + " is missing while replan") + + def _get_child_vms(self, _g, _vm_list, _e_vmk): + for sgk, sg in _g.subvgroups.iteritems(): + if isinstance(sg, VM): + if sgk != _e_vmk: + _vm_list.append(sgk) + else: + self._get_child_vms(sg, _vm_list, _e_vmk) + + def _place_planned_nodes(self): + init_level = LEVELS[len(LEVELS) - 1] + (planned_node_list, level) = self._open_planned_list(self.app_topology.vms, + self.app_topology.volumes, + self.app_topology.vgroups, + init_level) + if len(planned_node_list) == 0: + return True + + return self._run_greedy_as_planned(planned_node_list, level, self.avail_hosts) + + def _open_planned_list(self, _vms, _volumes, _vgroups, _current_level): + planned_node_list = [] + next_level = None + + for vmk, hk in self.app_topology.planned_vm_map.iteritems(): + if vmk in _vms.keys(): + vm = _vms[vmk] + if vm.host is None: + vm.host = [] + if hk not in vm.host: + vm.host.append(hk) + n = Node() + n.node = vm + n.sort_base = self._set_virtual_capacity_based_sort(vm) + planned_node_list.append(n) + + # NOTE: volumes skip + + else: + vgroup = self._get_vgroup_of_vm(vmk, _vgroups) + if vgroup is not None: + if vgroup.host is None: + vgroup.host = [] + host_name = self._get_host_of_vgroup(hk, vgroup.level) + if host_name is None: + self.logger.error("Search: host does not exist while replan with vgroup") + else: + # if len(vgroup.host) == 0: + if host_name not in vgroup.host: + vgroup.host.append(host_name) + node = None + for n in planned_node_list: + if n.node.uuid == vgroup.uuid: + node = n + break + if node is None: + n = Node() + n.node = vgroup + n.sort_base = self._set_virtual_capacity_based_sort(vgroup) + planned_node_list.append(n) + # else: + # self.logger.warn("Search: " + vmk + " is missing while replan") + + current_level_index = LEVELS.index(_current_level) + next_level_index = current_level_index - 1 + if next_level_index < 0: + next_level = LEVELS[0] + else: + next_level = LEVELS[next_level_index] + + return (planned_node_list, next_level) + + def _get_vgroup_of_vm(self, _vmk, _vgroups): + vgroup = None + + for _, g in _vgroups.iteritems(): + if self._check_vm_grouping(g, _vmk) is True: + vgroup = g + break + + return vgroup + + def _check_vm_grouping(self, _g, _vmk): + exist = False + + for sgk, sg in _g.subvgroups.iteritems(): + if isinstance(sg, VM): + if sgk == _vmk: + exist = True + break + elif isinstance(sg, VGroup): + if self._check_vm_grouping(sg, _vmk) is True: + exist = True + break + + return exist + + def _get_host_of_vgroup(self, _host, _level): + host = None + + if _level == "host": + host = _host + elif _level == "rack": + if _host in self.avail_hosts.keys(): + host = self.avail_hosts[_host].rack_name + elif _level == "cluster": + if _host in self.avail_hosts.keys(): + host = self.avail_hosts[_host].cluster_name + + return host + + def _run_greedy_as_planned(self, _node_list, _level, _avail_hosts): + avail_resources = {} + if _level == "cluster": + for _, h in _avail_hosts.iteritems(): + if h.cluster_name not in avail_resources.keys(): + avail_resources[h.cluster_name] = h + elif _level == "rack": + for _, h in _avail_hosts.iteritems(): + if h.rack_name not in avail_resources.keys(): + avail_resources[h.rack_name] = h + elif _level == "host": + avail_resources = _avail_hosts + + _node_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + self.logger.debug("Search: level = " + _level) + for on in _node_list: + self.logger.debug(" node = {}, value = {}".format(on.node.name, on.sort_base)) + + while len(_node_list) > 0: + n = _node_list.pop(0) + self.logger.debug("Search: level = " + _level + ", placing node = " + n.node.name) + + best_resource = self._get_best_resource_for_planned(n, _level, avail_resources) + if best_resource is not None: + debug_best_resource = best_resource.get_resource_name(_level) + # elif isinstance(n.node, Volume): + # debug_best_resource = best_resource.host_name + "@" + best_resource.storage.storage_name + self.logger.debug("Search: best resource = " + debug_best_resource + " for node = " + n.node.name) + + self._deduct_reservation(_level, best_resource, n) + self._close_planned_placement(_level, best_resource, n.node) + else: + self.logger.error("Search: fail to place already-planned VMs") + return False + + return True + + def _get_best_resource_for_planned(self, _n, _level, _avail_resources): + best_resource = None + + if _level == "host" and (isinstance(_n.node, VM) or isinstance(_n.node, Volume)): + best_resource = copy.deepcopy(_avail_resources[_n.node.host[0]]) + best_resource.level = "host" + # storage set + else: + vms = {} + volumes = {} + vgroups = {} + if isinstance(_n.node, VGroup): + if LEVELS.index(_n.node.level) < LEVELS.index(_level): + vgroups[_n.node.uuid] = _n.node + else: + for _, sg in _n.node.subvgroups.iteritems(): + if isinstance(sg, VM): + vms[sg.uuid] = sg + elif isinstance(sg, Volume): + volumes[sg.uuid] = sg + elif isinstance(sg, VGroup): + vgroups[sg.uuid] = sg + else: + if isinstance(_n.node, VM): + vms[_n.node.uuid] = _n.node + elif isinstance(_n.node, Volume): + volumes[_n.node.uuid] = _n.node + + (planned_node_list, level) = self._open_planned_list(vms, volumes, vgroups, _level) + + host_name = self._get_host_of_level(_n, _level) + if host_name is None: + self.logger.warn("Search: cannot find host while replanning") + return None + + avail_hosts = {} + for hk, h in self.avail_hosts.iteritems(): + if _level == "cluster": + if h.cluster_name == host_name: + avail_hosts[hk] = h + elif _level == "rack": + if h.rack_name == host_name: + avail_hosts[hk] = h + elif _level == "host": + if h.host_name == host_name: + avail_hosts[hk] = h + + if self._run_greedy_as_planned(planned_node_list, level, avail_hosts) is True: + best_resource = copy.deepcopy(_avail_resources[host_name]) + best_resource.level = _level + + return best_resource + + def _get_host_of_level(self, _n, _level): + host_name = None + + if isinstance(_n.node, VM): + host_name = self._get_host_of_vgroup(_n.node.host[0], _level) + elif isinstance(_n.node, VGroup): + if _n.node.level == "host": + host_name = self._get_host_of_vgroup(_n.node.host[0], _level) + elif _n.node.level == "rack": + if _level == "rack": + host_name = _n.node.host[0] + elif _level == "cluster": + for _, ah in self.avail_hosts.iteritems(): + if ah.rack_name == _n.node.host[0]: + host_name = ah.cluster_name + break + elif _n.node.level == "cluster": + if _level == "cluster": + host_name = _n.node.host[0] + + return host_name + + def _close_planned_placement(self, _level, _best, _v): + if _v not in self.planned_placements.keys(): + if _level == "host" or isinstance(_v, VGroup): + self.planned_placements[_v] = _best + + def _create_avail_hosts(self): + for hk, host in self.resource.hosts.iteritems(): + + if host.check_availability() is False: + self.logger.debug("Search: host (" + host.name + ") not available at this time") + continue + + r = Resource() + r.host_name = hk + + for mk in host.memberships.keys(): + if mk in self.avail_logical_groups.keys(): + r.host_memberships[mk] = self.avail_logical_groups[mk] + + for sk in host.storages.keys(): + if sk in self.avail_storage_hosts.keys(): + r.host_avail_storages[sk] = self.avail_storage_hosts[sk] + + for sk in host.switches.keys(): + if sk in self.avail_switches.keys(): + r.host_avail_switches[sk] = self.avail_switches[sk] + + r.host_vCPUs = host.original_vCPUs + r.host_avail_vCPUs = host.avail_vCPUs + r.host_mem = host.original_mem_cap + r.host_avail_mem = host.avail_mem_cap + r.host_local_disk = host.original_local_disk_cap + r.host_avail_local_disk = host.avail_local_disk_cap + + r.host_num_of_placed_vms = len(host.vm_list) + + rack = host.host_group + if isinstance(rack, Datacenter): + r.rack_name = "any" + r.cluster_name = "any" + else: + if rack.status != "enabled": + continue + + r.rack_name = rack.name + + for mk in rack.memberships.keys(): + if mk in self.avail_logical_groups.keys(): + r.rack_memberships[mk] = self.avail_logical_groups[mk] + + for rsk in rack.storages.keys(): + if rsk in self.avail_storage_hosts.keys(): + r.rack_avail_storages[rsk] = self.avail_storage_hosts[rsk] + + for rsk in rack.switches.keys(): + if rsk in self.avail_switches.keys(): + r.rack_avail_switches[rsk] = self.avail_switches[rsk] + + r.rack_vCPUs = rack.original_vCPUs + r.rack_avail_vCPUs = rack.avail_vCPUs + r.rack_mem = rack.original_mem_cap + r.rack_avail_mem = rack.avail_mem_cap + r.rack_local_disk = rack.original_local_disk_cap + r.rack_avail_local_disk = rack.avail_local_disk_cap + + r.rack_num_of_placed_vms = len(rack.vm_list) + + cluster = rack.parent_resource + if isinstance(cluster, Datacenter): + r.cluster_name = "any" + else: + if cluster.status != "enabled": + continue + + r.cluster_name = cluster.name + + for mk in cluster.memberships.keys(): + if mk in self.avail_logical_groups.keys(): + r.cluster_memberships[mk] = self.avail_logical_groups[mk] + + for csk in cluster.storages.keys(): + if csk in self.avail_storage_hosts.keys(): + r.cluster_avail_storages[csk] = self.avail_storage_hosts[csk] + + for csk in cluster.switches.keys(): + if csk in self.avail_switches.keys(): + r.cluster_avail_switches[csk] = self.avail_switches[csk] + + r.cluster_vCPUs = cluster.original_vCPUs + r.cluster_avail_vCPUs = cluster.avail_vCPUs + r.cluster_mem = cluster.original_mem_cap + r.cluster_avail_mem = cluster.avail_mem_cap + r.cluster_local_disk = cluster.original_local_disk_cap + r.cluster_avail_local_disk = cluster.avail_local_disk_cap + + r.cluster_num_of_placed_vms = len(cluster.vm_list) + + if r.host_num_of_placed_vms > 0: + self.num_of_hosts += 1 + + self.avail_hosts[hk] = r + + def _create_avail_logical_groups(self): + for lgk, lg in self.resource.logical_groups.iteritems(): + + if lg.status != "enabled": + self.logger.debug("Search: group (" + lg.name + ") disabled") + continue + + lgr = LogicalGroupResource() + lgr.name = lgk + lgr.group_type = lg.group_type + + for mk, mv in lg.metadata.iteritems(): + lgr.metadata[mk] = mv + + lgr.num_of_placed_vms = len(lg.vm_list) + for hk in lg.vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[hk] = len(lg.vms_per_host[hk]) + + for hk in lg.vms_per_host.keys(): + if hk in self.resource.hosts.keys(): + host = self.resource.hosts[hk] + if host.check_availability() is False: + for vm_id in host.vm_list: + if lg.exist_vm_by_uuid(vm_id[2]) is True: + lgr.num_of_placed_vms -= 1 + if hk in lgr.num_of_placed_vms_per_host.keys(): + del lgr.num_of_placed_vms_per_host[hk] + elif hk in self.resource.host_groups.keys(): + host_group = self.resource.host_groups[hk] + if host_group.check_availability() is False: + for vm_id in host_group.vm_list: + if lg.exist_vm_by_uuid(vm_id[2]) is True: + lgr.num_of_placed_vms -= 1 + if hk in lgr.num_of_placed_vms_per_host.keys(): + del lgr.num_of_placed_vms_per_host[hk] + + self.avail_logical_groups[lgk] = lgr + + def _adjust_resources(self): + for h_uuid, info in self.app_topology.old_vm_map.iteritems(): # info = (host, cpu, mem, disk) + if info[0] not in self.avail_hosts.keys(): + continue + + r = self.avail_hosts[info[0]] + + r.host_num_of_placed_vms -= 1 + r.host_avail_vCPUs += info[1] + r.host_avail_mem += info[2] + r.host_avail_local_disk += info[3] + + if r.host_num_of_placed_vms == 0: + self.num_of_hosts -= 1 + + for _, rr in self.avail_hosts.iteritems(): + if rr.rack_name != "any" and rr.rack_name == r.rack_name: + rr.rack_num_of_placed_vms -= 1 + rr.rack_avail_vCPUs += info[1] + rr.rack_avail_mem += info[2] + rr.rack_avail_local_disk += info[3] + + for _, cr in self.avail_hosts.iteritems(): + if cr.cluster_name != "any" and cr.cluster_name == r.cluster_name: + cr.cluster_num_of_placed_vms -= 1 + cr.cluster_avail_vCPUs += info[1] + cr.cluster_avail_mem += info[2] + cr.cluster_avail_local_disk += info[3] + + for lgk in r.host_memberships.keys(): + if lgk not in self.avail_logical_groups.keys(): + continue + if lgk not in self.resource.logical_groups.keys(): + continue + lg = self.resource.logical_groups[lgk] + if lg.exist_vm_by_h_uuid(h_uuid) is True: + lgr = r.host_memberships[lgk] + lgr.num_of_placed_vms -= 1 + if r.host_name in lgr.num_of_placed_vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[r.host_name] -= 1 + if lgr.group_type == "EX" or lgr.group_type == "AFF" or lgr.group_type == "DIV": + if lgr.num_of_placed_vms_per_host[r.host_name] == 0: + del lgr.num_of_placed_vms_per_host[r.host_name] + del r.host_memberships[lgk] + if lgr.group_type == "EX" or lgr.group_type == "AFF" or lgr.group_type == "DIV": + if lgr.num_of_placed_vms == 0: + del self.avail_logical_groups[lgk] + + for lgk in r.rack_memberships.keys(): + if lgk not in self.avail_logical_groups.keys(): + continue + if lgk not in self.resource.logical_groups.keys(): + continue + lg = self.resource.logical_groups[lgk] + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == "rack": + if lg.exist_vm_by_h_uuid(h_uuid) is True: + lgr = r.rack_memberships[lgk] + lgr.num_of_placed_vms -= 1 + if r.rack_name in lgr.num_of_placed_vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[r.rack_name] -= 1 + if lgr.num_of_placed_vms_per_host[r.rack_name] == 0: + del lgr.num_of_placed_vms_per_host[r.rack_name] + for _, rr in self.avail_hosts.iteritems(): + if rr.rack_name != "any" and rr.rack_name == r.rack_name: + del rr.rack_memberships[lgk] + if lgr.num_of_placed_vms == 0: + del self.avail_logical_groups[lgk] + + for lgk in r.cluster_memberships.keys(): + if lgk not in self.avail_logical_groups.keys(): + continue + if lgk not in self.resource.logical_groups.keys(): + continue + lg = self.resource.logical_groups[lgk] + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == "cluster": + if lg.exist_vm_by_h_uuid(h_uuid) is True: + lgr = r.cluster_memberships[lgk] + lgr.num_of_placed_vms -= 1 + if r.cluster_name in lgr.num_of_placed_vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[r.cluster_name] -= 1 + if lgr.num_of_placed_vms_per_host[r.cluster_name] == 0: + del lgr.num_of_placed_vms_per_host[r.cluster_name] + for _, cr in self.avail_hosts.iteritems(): + if cr.cluster_name != "any" and cr.cluster_name == r.cluster_name: + del cr.cluster_memberships[lgk] + if lgr.num_of_placed_vms == 0: + del self.avail_logical_groups[lgk] + + def _create_avail_storage_hosts(self): + for _, sh in self.resource.storage_hosts.iteritems(): + + if sh.status != "enabled": + continue + + sr = StorageResource() + sr.storage_name = sh.name + sr.storage_class = sh.storage_class + sr.storage_avail_disk = sh.avail_disk_cap + + self.avail_storage_hosts[sr.storage_name] = sr + + def _create_avail_switches(self): + for sk, s in self.resource.switches.iteritems(): + + if s.status != "enabled": + continue + + sr = SwitchResource() + sr.switch_name = s.name + sr.switch_type = s.switch_type + + for _, ul in s.up_links.iteritems(): + sr.avail_bandwidths.append(ul.avail_nw_bandwidth) + + # NOTE: peer_links? + + self.avail_switches[sk] = sr + + def _compute_resource_weights(self): + denominator = 0.0 + for (t, w) in self.app_topology.optimization_priority: + denominator += w + + for (t, w) in self.app_topology.optimization_priority: + if t == "bw": + self.nw_bandwidth_weight = float(w / denominator) + elif t == "cpu": + self.CPU_weight = float(w / denominator) + elif t == "mem": + self.mem_weight = float(w / denominator) + elif t == "lvol": + self.local_disk_weight = float(w / denominator) + elif t == "vol": + self.disk_weight = float(w / denominator) + + self.logger.debug("Search: placement priority weights") + for (r, w) in self.app_topology.optimization_priority: + if r == "bw": + self.logger.debug(" nw weight = " + str(self.nw_bandwidth_weight)) + elif r == "cpu": + self.logger.debug(" cpu weight = " + str(self.CPU_weight)) + elif r == "mem": + self.logger.debug(" mem weight = " + str(self.mem_weight)) + elif r == "lvol": + self.logger.debug(" local disk weight = " + str(self.local_disk_weight)) + elif r == "vol": + self.logger.debug(" disk weight = " + str(self.disk_weight)) + + def _open_list(self, _vms, _volumes, _vgroups, _current_level): + open_node_list = [] + next_level = None + + for _, vm in _vms.iteritems(): + n = Node() + n.node = vm + n.sort_base = self._set_virtual_capacity_based_sort(vm) + open_node_list.append(n) + + # volume handling + + for _, g in _vgroups.iteritems(): + n = Node() + n.node = g + n.sort_base = self._set_virtual_capacity_based_sort(g) + open_node_list.append(n) + + current_level_index = LEVELS.index(_current_level) + next_level_index = current_level_index - 1 + if next_level_index < 0: + next_level = LEVELS[0] + else: + next_level = LEVELS[next_level_index] + + return (open_node_list, next_level) + + def _set_virtual_capacity_based_sort(self, _v): + sort_base = -1 + + if isinstance(_v, Volume): + sort_base = self.disk_weight * _v.volume_weight + sort_base += self.nw_bandwidth_weight * _v.bandwidth_weight + elif isinstance(_v, VM): + sort_base = self.nw_bandwidth_weight * _v.bandwidth_weight + sort_base += self.CPU_weight * _v.vCPU_weight + sort_base += self.mem_weight * _v.mem_weight + sort_base += self.local_disk_weight * _v.local_volume_weight + elif isinstance(_v, VGroup): + sort_base = self.nw_bandwidth_weight * _v.bandwidth_weight + sort_base += self.CPU_weight * _v.vCPU_weight + sort_base += self.mem_weight * _v.mem_weight + sort_base += self.local_disk_weight * _v.local_volume_weight + sort_base += self.disk_weight * _v.volume_weight + + return sort_base + + def _run_greedy(self, _open_node_list, _level, _avail_hosts): + success = True + + avail_resources = {} + if _level == "cluster": + for _, h in _avail_hosts.iteritems(): + if h.cluster_name not in avail_resources.keys(): + avail_resources[h.cluster_name] = h + elif _level == "rack": + for _, h in _avail_hosts.iteritems(): + if h.rack_name not in avail_resources.keys(): + avail_resources[h.rack_name] = h + elif _level == "host": + avail_resources = _avail_hosts + + _open_node_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + + self.logger.debug("Search: the order of open node list in level = " + _level) + for on in _open_node_list: + self.logger.debug(" node = {}, value = {}".format(on.node.name, on.sort_base)) + + while len(_open_node_list) > 0: + n = _open_node_list.pop(0) + self.logger.debug("Search: level = " + _level + ", node = " + n.node.name) + + best_resource = self._get_best_resource(n, _level, avail_resources) + if best_resource is None: + success = False + break + + debug_best_resource = best_resource.get_resource_name(_level) + # if isinstance(n.node, Volume): + # debug_best_resource = debug_best_resource + "@" + best_resource.storage.storage_name + self.logger.debug("Search: best resource = " + debug_best_resource + " for node = " + n.node.name) + + if n.node not in self.planned_placements.keys(): + ''' for VM or Volume under host level only ''' + self._deduct_reservation(_level, best_resource, n) + ''' close all types of nodes under any level, but VM or Volume with above host level ''' + self._close_node_placement(_level, best_resource, n.node) + else: + self.logger.debug("Search: node (" + n.node.name + ") is already deducted") + + return success + + def _get_best_resource(self, _n, _level, _avail_resources): + ''' already planned vgroup ''' + planned_host = None + if _n.node in self.planned_placements.keys(): + self.logger.debug("Search: already determined node = " + _n.node.name) + copied_host = self.planned_placements[_n.node] + if _level == "host": + planned_host = _avail_resources[copied_host.host_name] + elif _level == "rack": + planned_host = _avail_resources[copied_host.rack_name] + elif _level == "cluster": + planned_host = _avail_resources[copied_host.cluster_name] + + else: + if len(self.app_topology.candidate_list_map) > 0: + conflicted_vm_uuid = self.app_topology.candidate_list_map.keys()[0] + candidate_name_list = self.app_topology.candidate_list_map[conflicted_vm_uuid] + if (isinstance(_n.node, VM) and conflicted_vm_uuid == _n.node.uuid) or \ + (isinstance(_n.node, VGroup) and self._check_vm_grouping(_n.node, conflicted_vm_uuid) is True): + host_list = [] + for hk in candidate_name_list: + host_name = self._get_host_of_vgroup(hk, _level) + if host_name is not None: + if host_name not in host_list: + host_list.append(host_name) + else: + self.logger.warn("Search: cannot find candidate host while replanning") + _n.node.host = host_list + + candidate_list = [] + if planned_host is not None: + candidate_list.append(planned_host) + else: + candidate_list = self.constraint_solver.compute_candidate_list(_level, _n, + self.node_placements, + _avail_resources, + self.avail_logical_groups) + if len(candidate_list) == 0: + self.status = self.constraint_solver.status + return None + + self.logger.debug("Search: candidate list") + for c in candidate_list: + self.logger.debug(" candidate = " + c.get_resource_name(_level)) + + (target, _) = self.app_topology.optimization_priority[0] + top_candidate_list = None + + if target == "bw": + constrained_list = [] + for cr in candidate_list: + cr.sort_base = self._estimate_max_bandwidth(_level, _n, cr) + if cr.sort_base == -1: + constrained_list.append(cr) + + for c in constrained_list: + if c in candidate_list: + candidate_list.remove(c) + + if len(candidate_list) == 0: + self.status = "no available network bandwidth left, for node = " + _n.node.name + self.logger.error("Search: " + self.status) + return None + + candidate_list.sort(key=operator.attrgetter("sort_base")) + top_candidate_list = self._sort_highest_consolidation(_n, _level, candidate_list) + else: + if target == "vol": + if isinstance(_n.node, VGroup) or isinstance(_n.node, Volume): + volume_class = None + if isinstance(_n.node, Volume): + volume_class = _n.node.volume_class + else: + max_size = 0 + for vck in _n.node.volume_sizes.keys(): + if _n.node.volume_sizes[vck] > max_size: + max_size = _n.node.volume_sizes[vck] + volume_class = vck + + self._set_disk_sort_base(_level, candidate_list, volume_class) + candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + else: + self._set_compute_sort_base(_level, candidate_list) + candidate_list.sort(key=operator.attrgetter("sort_base")) + else: + if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): + self._set_compute_sort_base(_level, candidate_list) + candidate_list.sort(key=operator.attrgetter("sort_base")) + else: + self._set_disk_sort_base(_level, candidate_list, _n.node.volume_class) + candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + + top_candidate_list = self._sort_lowest_bandwidth_usage(_n, _level, candidate_list) + + if len(top_candidate_list) == 0: + self.status = "no available network bandwidth left" + self.logger.error("Search: " + self.status) + return None + + best_resource = None + if _level == "host" and (isinstance(_n.node, VM) or isinstance(_n.node, Volume)): + best_resource = copy.deepcopy(top_candidate_list[0]) + best_resource.level = "host" + if isinstance(_n.node, Volume): + self._set_best_storage(_n, best_resource) + else: + while True: + + while len(top_candidate_list) > 0: + cr = top_candidate_list.pop(0) + + self.logger.debug("Search: try candidate = " + cr.get_resource_name(_level)) + + vms = {} + volumes = {} + vgroups = {} + if isinstance(_n.node, VGroup): + if LEVELS.index(_n.node.level) < LEVELS.index(_level): + vgroups[_n.node.uuid] = _n.node + else: + for _, sg in _n.node.subvgroups.iteritems(): + if isinstance(sg, VM): + vms[sg.uuid] = sg + elif isinstance(sg, Volume): + volumes[sg.uuid] = sg + elif isinstance(sg, VGroup): + vgroups[sg.uuid] = sg + else: + if isinstance(_n.node, VM): + vms[_n.node.uuid] = _n.node + elif isinstance(_n.node, Volume): + volumes[_n.node.uuid] = _n.node + + (open_node_list, level) = self._open_list(vms, volumes, vgroups, _level) + if open_node_list is None: + break + + avail_hosts = {} + for hk, h in self.avail_hosts.iteritems(): + if _level == "cluster": + if h.cluster_name == cr.cluster_name: + avail_hosts[hk] = h + elif _level == "rack": + if h.rack_name == cr.rack_name: + avail_hosts[hk] = h + elif _level == "host": + if h.host_name == cr.host_name: + avail_hosts[hk] = h + + ''' recursive call ''' + if self._run_greedy(open_node_list, level, avail_hosts) is True: + best_resource = copy.deepcopy(cr) + best_resource.level = _level + break + else: + debug_candidate_name = cr.get_resource_name(_level) + self.logger.debug("Search: rollback of candidate resource = " + debug_candidate_name) + + if planned_host is None: + ''' recursively rollback deductions of all child VMs and Volumes of _n ''' + self._rollback_reservation(_n.node) + ''' recursively rollback closing ''' + self._rollback_node_placement(_n.node) + else: + break + + ''' after explore top candidate list for _n ''' + if best_resource is not None: + break + else: + if len(candidate_list) == 0: + self.status = "no available hosts" + self.logger.warn("Search: " + self.status) + break + else: + if target == "bw": + top_candidate_list = self._sort_highest_consolidation(_n, _level, candidate_list) + else: + top_candidate_list = self._sort_lowest_bandwidth_usage(_n, _level, candidate_list) + if len(top_candidate_list) == 0: + self.status = "no available network bandwidth left" + self.logger.warn("Search: " + self.status) + break + + return best_resource + + def _set_best_storage(self, _n, _resource): + max_storage_size = 0 + for sk in _resource.host_avail_storages.keys(): + s = self.avail_storage_hosts[sk] + if _n.node.volume_class == "any" or s.storage_class == _n.node.volume_class: + if s.storage_avail_disk > max_storage_size: + max_storage_size = s.storage_avail_disk + _resource.storage = s + + def _sort_lowest_bandwidth_usage(self, _n, _level, _candidate_list): + while True: + top_candidate_list = [] + best_usage = _candidate_list[0].sort_base + + rm_list = [] + for ch in _candidate_list: + if ch.sort_base == best_usage: + top_candidate_list.append(ch) + rm_list.append(ch) + else: + break + _candidate_list[:] = [c for c in _candidate_list if c not in rm_list] + + constrained_list = [] + for c in top_candidate_list: + c.sort_base = self._estimate_max_bandwidth(_level, _n, c) + if c.sort_base == -1: + constrained_list.append(c) + + for c in constrained_list: + if c in top_candidate_list: + top_candidate_list.remove(c) + + if len(top_candidate_list) > 0: + top_candidate_list.sort(key=operator.attrgetter("sort_base")) + break + + if len(_candidate_list) == 0: + break + + return top_candidate_list + + def _sort_highest_consolidation(self, _n, _level, _candidate_list): + top_candidate_list = [] + best_bandwidth_usage = _candidate_list[0].sort_base + + rm_list = [] + for ch in _candidate_list: + if ch.sort_base == best_bandwidth_usage: + top_candidate_list.append(ch) + rm_list.append(ch) + else: + break + _candidate_list[:] = [c for c in _candidate_list if c not in rm_list] + + target = None + for (t, _) in self.app_topology.optimization_priority: + if t != "bw": + target = t + break + + if target == "vol": + if isinstance(_n.node, VGroup) or isinstance(_n.node, Volume): + volume_class = None + if isinstance(_n.node, Volume): + volume_class = _n.node.volume_class + else: + max_size = 0 + for vck in _n.node.volume_sizes.keys(): + if _n.node.volume_sizes[vck] > max_size: + max_size = _n.node.volume_sizes[vck] + volume_class = vck + self._set_disk_sort_base(_level, top_candidate_list, volume_class) + top_candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + else: + self._set_compute_sort_base(_level, top_candidate_list) + top_candidate_list.sort(key=operator.attrgetter("sort_base")) + else: + if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): + self._set_compute_sort_base(_level, top_candidate_list) + top_candidate_list.sort(key=operator.attrgetter("sort_base")) + else: + self._set_disk_sort_base(_level, top_candidate_list, _n.node.volume_class) + top_candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + + return top_candidate_list + + def _set_disk_sort_base(self, _level, _candidate_list, _class): + for c in _candidate_list: + avail_storages = {} + if _level == "cluster": + for sk in c.cluster_avail_storages.keys(): + s = c.cluster_avail_storages[sk] + if _class == "any" or s.storage_class == _class: + avail_storages[sk] = s + elif _level == "rack": + for sk in c.rack_avail_storages.keys(): + s = c.rack_avail_storages[sk] + if _class == "any" or s.storage_class == _class: + avail_storages[sk] = s + elif _level == "host": + for sk in c.host_avail_storages.keys(): + s = c.host_avail_storages[sk] + if _class == "any" or s.storage_class == _class: + avail_storages[sk] = s + + current_max = 0 + for sk in avail_storages.keys(): + s = avail_storages[sk] + if s.storage_avail_disk > current_max: + current_max = s.storage_avail_disk + + c.sort_base = current_max + + def _set_compute_sort_base(self, _level, _candidate_list): + for c in _candidate_list: + CPU_ratio = -1 + mem_ratio = -1 + local_disk_ratio = -1 + if _level == "cluster": + CPU_ratio = float(c.cluster_avail_vCPUs) / float(self.resource.CPU_avail) + mem_ratio = float(c.cluster_avail_mem) / float(self.resource.mem_avail) + local_disk_ratio = float(c.cluster_avail_local_disk) / float(self.resource.local_disk_avail) + elif _level == "rack": + CPU_ratio = float(c.rack_avail_vCPUs) / float(self.resource.CPU_avail) + mem_ratio = float(c.rack_avail_mem) / float(self.resource.mem_avail) + local_disk_ratio = float(c.rack_avail_local_disk) / float(self.resource.local_disk_avail) + elif _level == "host": + CPU_ratio = float(c.host_avail_vCPUs) / float(self.resource.CPU_avail) + mem_ratio = float(c.host_avail_mem) / float(self.resource.mem_avail) + local_disk_ratio = float(c.host_avail_local_disk) / float(self.resource.local_disk_avail) + c.sort_base = (1.0 - self.CPU_weight) * CPU_ratio + \ + (1.0 - self.mem_weight) * mem_ratio + \ + (1.0 - self.local_disk_weight) * local_disk_ratio + + def _estimate_max_bandwidth(self, _level, _n, _candidate): + nw_bandwidth_penalty = self._estimate_nw_bandwidth_penalty(_level, _n, _candidate) + + if nw_bandwidth_penalty >= 0: + return nw_bandwidth_penalty + else: + return -1 + + def _estimate_nw_bandwidth_penalty(self, _level, _n, _candidate): + sort_base = 0 # Set bandwidth usage penalty by placement + + # To check the bandwidth constraint at the last moment + # 3rd entry to be used for special node communicating beyond datacenter or zone + req_bandwidths = [0, 0, 0] + + link_list = _n.get_all_links() + + placed_link_list = [] + for vl in link_list: + for v in self.node_placements.keys(): + if v.uuid == vl.node.uuid: + placed_link_list.append(vl) + + bandwidth = _n.get_bandwidth_of_link(vl) + placement_level = _candidate.get_common_placement(self.node_placements[v]) + if placement_level != "ANY" and LEVELS.index(placement_level) >= LEVELS.index(_level): + sort_base += compute_reservation(_level, placement_level, bandwidth) + self.constraint_solver.get_req_bandwidths(_level, + placement_level, + bandwidth, + req_bandwidths) + + candidate = copy.deepcopy(_candidate) + + exclusivity_ids = self.constraint_solver.get_exclusivities(_n.node.exclusivity_groups, _level) + exclusivity_id = None + if len(exclusivity_ids) > 0: + exclusivity_id = exclusivity_ids[exclusivity_ids.keys()[0]] + temp_exclusivity_insert = False + if exclusivity_id is not None: + if exclusivity_id not in self.avail_logical_groups.keys(): + temp_lgr = LogicalGroupResource() + temp_lgr.name = exclusivity_id + temp_lgr.group_type = "EX" + + self.avail_logical_groups[exclusivity_id] = temp_lgr + temp_exclusivity_insert = True + + self._add_exclusivity_to_candidate(_level, candidate, exclusivity_id) + + affinity_id = _n.get_affinity_id() + temp_affinity_insert = False + if affinity_id is not None: + if affinity_id not in self.avail_logical_groups.keys(): + temp_lgr = LogicalGroupResource() + temp_lgr.name = affinity_id + temp_lgr.group_type = "AFF" + + self.avail_logical_groups[affinity_id] = temp_lgr + temp_affinity_insert = True + + self._add_affinity_to_candidate(_level, candidate, affinity_id) + + self._deduct_reservation_from_candidate(candidate, _n, req_bandwidths, _level) + + handled_vgroups = {} + for vl in link_list: + if vl in placed_link_list: + continue + + bandwidth = _n.get_bandwidth_of_link(vl) + + diversity_level = _n.get_common_diversity(vl.node.diversity_groups) + if diversity_level == "ANY": + implicit_diversity = self.constraint_solver.get_implicit_diversity(_n.node, + link_list, + vl.node, + _level) + if implicit_diversity[0] is not None: + diversity_level = implicit_diversity[1] + if diversity_level == "ANY" or LEVELS.index(diversity_level) < LEVELS.index(_level): + vg = self._get_top_vgroup(vl.node, _level) + if vg.uuid not in handled_vgroups.keys(): + handled_vgroups[vg.uuid] = vg + + temp_n = Node() + temp_n.node = vg + temp_req_bandwidths = [0, 0, 0] + self.constraint_solver.get_req_bandwidths(_level, _level, bandwidth, temp_req_bandwidths) + + if self._check_availability(_level, temp_n, candidate) is True: + self._deduct_reservation_from_candidate(candidate, temp_n, temp_req_bandwidths, _level) + else: + sort_base += compute_reservation(_level, _level, bandwidth) + req_bandwidths[0] += temp_req_bandwidths[0] + req_bandwidths[1] += temp_req_bandwidths[1] + req_bandwidths[2] += temp_req_bandwidths[2] + else: + self.constraint_solver.get_req_bandwidths(_level, diversity_level, bandwidth, req_bandwidths) + sort_base += compute_reservation(_level, diversity_level, bandwidth) + + if self.constraint_solver._check_nw_bandwidth_availability(_level, req_bandwidths, _candidate) is False: + sort_base = -1 + + if temp_exclusivity_insert is True: + del self.avail_logical_groups[exclusivity_id] + + if temp_affinity_insert is True: + del self.avail_logical_groups[affinity_id] + + return sort_base + + def _add_exclusivity_to_candidate(self, _level, _candidate, _exclusivity_id): + lgr = self.avail_logical_groups[_exclusivity_id] + + if _level == "host": + if _exclusivity_id not in _candidate.host_memberships.keys(): + _candidate.host_memberships[_exclusivity_id] = lgr + if _exclusivity_id not in _candidate.rack_memberships.keys(): + _candidate.rack_memberships[_exclusivity_id] = lgr + if _exclusivity_id not in _candidate.cluster_memberships.keys(): + _candidate.cluster_memberships[_exclusivity_id] = lgr + elif _level == "rack": + if _exclusivity_id not in _candidate.rack_memberships.keys(): + _candidate.rack_memberships[_exclusivity_id] = lgr + if _exclusivity_id not in _candidate.cluster_memberships.keys(): + _candidate.cluster_memberships[_exclusivity_id] = lgr + elif _level == "cluster": + if _exclusivity_id not in _candidate.cluster_memberships.keys(): + _candidate.cluster_memberships[_exclusivity_id] = lgr + + def _add_affinity_to_candidate(self, _level, _candidate, _affinity_id): + lgr = self.avail_logical_groups[_affinity_id] + + if _level == "host": + if _affinity_id not in _candidate.host_memberships.keys(): + _candidate.host_memberships[_affinity_id] = lgr + if _affinity_id not in _candidate.rack_memberships.keys(): + _candidate.rack_memberships[_affinity_id] = lgr + if _affinity_id not in _candidate.cluster_memberships.keys(): + _candidate.cluster_memberships[_affinity_id] = lgr + elif _level == "rack": + if _affinity_id not in _candidate.rack_memberships.keys(): + _candidate.rack_memberships[_affinity_id] = lgr + if _affinity_id not in _candidate.cluster_memberships.keys(): + _candidate.cluster_memberships[_affinity_id] = lgr + elif _level == "cluster": + if _affinity_id not in _candidate.cluster_memberships.keys(): + _candidate.cluster_memberships[_affinity_id] = lgr + + def _get_top_vgroup(self, _v, _level): + vg = _v.survgroup + + if vg is None: + return _v + + if LEVELS.index(vg.level) > LEVELS.index(_level): + return _v + + return self._get_top_vgroup(vg, _level) + + def _check_availability(self, _level, _n, _candidate): + if isinstance(_n.node, VM): + if self.constraint_solver.check_cpu_capacity(_level, _n.node, _candidate) is False: + return False + if self.constraint_solver.check_mem_capacity(_level, _n.node, _candidate) is False: + return False + if self.constraint_solver.check_local_disk_capacity(_level, _n.node, _candidate) is False: + return False + elif isinstance(_n.node, Volume): + if self.constraint_solver.check_storage_availability(_level, _n.node, _candidate) is False: + return False + else: + if self.constraint_solver.check_cpu_capacity(_level, _n.node, _candidate) is False or \ + self.constraint_solver.check_mem_capacity(_level, _n.node, _candidate) is False or \ + self.constraint_solver.check_local_disk_capacity(_level, _n.node, _candidate) is False or \ + self.constraint_solver.check_storage_availability(_level, _n.node, _candidate) is False: + return False + + if self.constraint_solver.check_nw_bandwidth_availability(_level, _n, + self.node_placements, + _candidate) is False: + return False + + if isinstance(_n.node, VM): + if len(_n.node.extra_specs_list) > 0: + if self.constraint_solver.check_host_aggregates(_level, _candidate, _n.node) is False: + return False + + if isinstance(_n.node, VM): + if _n.node.availability_zone is not None: + if self.constraint_solver.check_availability_zone(_level, _candidate, _n.node) is False: + return False + + if self.constraint_solver.conflict_diversity(_level, _n, self.node_placements, _candidate) is True: + return False + + exclusivities = self.constraint_solver.get_exclusivities(_n.node.exclusivity_groups, _level) + if len(exclusivities) == 1: + exc_id = exclusivities[exclusivities.keys()[0]] + if self.constraint_solver.exist_group(_level, exc_id, "EX", _candidate) is False: + return False + elif len(exclusivities) < 1: + if self.constraint_solver.conflict_exclusivity(_level, _candidate): + return False + + aff_id = _n.get_affinity_id() + if aff_id is not None: + if aff_id in self.avail_logical_groups.keys(): + if self.constraint_solver.exist_group(_level, aff_id, "AFF", _candidate) is False: + return False + + return True + + def _deduct_reservation_from_candidate(self, _candidate, _n, _rsrv, _level): + if isinstance(_n.node, VM) or isinstance(_n.node, VGroup): + self._deduct_candidate_vm_reservation(_level, _n.node, _candidate) + + if isinstance(_n.node, Volume) or isinstance(_n.node, VGroup): + self._deduct_candidate_volume_reservation(_level, _n.node, _candidate) + + self._deduct_candidate_nw_reservation(_candidate, _rsrv) + + def _deduct_candidate_vm_reservation(self, _level, _v, _candidate): + is_vm_included = False + if isinstance(_v, VM): + is_vm_included = True + elif isinstance(_v, VGroup): + is_vm_included = self._check_vm_included(_v) + + if _level == "cluster": + _candidate.cluster_avail_vCPUs -= _v.vCPUs + _candidate.cluster_avail_mem -= _v.mem + _candidate.cluster_avail_local_disk -= _v.local_volume_size + if is_vm_included is True: + _candidate.cluster_num_of_placed_vms += 1 + elif _level == "rack": + _candidate.cluster_avail_vCPUs -= _v.vCPUs + _candidate.cluster_avail_mem -= _v.mem + _candidate.cluster_avail_local_disk -= _v.local_volume_size + if is_vm_included is True: + _candidate.cluster_num_of_placed_vms += 1 + _candidate.rack_avail_vCPUs -= _v.vCPUs + _candidate.rack_avail_mem -= _v.mem + _candidate.rack_avail_local_disk -= _v.local_volume_size + if is_vm_included is True: + _candidate.rack_num_of_placed_vms += 1 + elif _level == "host": + _candidate.cluster_avail_vCPUs -= _v.vCPUs + _candidate.cluster_avail_mem -= _v.mem + _candidate.cluster_avail_local_disk -= _v.local_volume_size + if is_vm_included is True: + _candidate.cluster_num_of_placed_vms += 1 + _candidate.rack_avail_vCPUs -= _v.vCPUs + _candidate.rack_avail_mem -= _v.mem + _candidate.rack_avail_local_disk -= _v.local_volume_size + if is_vm_included is True: + _candidate.rack_num_of_placed_vms += 1 + _candidate.host_avail_vCPUs -= _v.vCPUs + _candidate.host_avail_mem -= _v.mem + _candidate.host_avail_local_disk -= _v.local_volume_size + if is_vm_included is True: + _candidate.host_num_of_placed_vms += 1 + + def _check_vm_included(self, _v): + is_vm_included = False + + for _, sv in _v.subvgroups.iteritems(): + if isinstance(sv, VM): + is_vm_included = True + break + elif isinstance(sv, VGroup): + is_vm_included = self._check_vm_included(sv) + if is_vm_included is True: + break + + return is_vm_included + + def _deduct_candidate_volume_reservation(self, _level, _v, _candidate): + volume_sizes = [] + if isinstance(_v, VGroup): + for vck in _v.volume_sizes.keys(): + volume_sizes.append((vck, _v.volume_sizes[vck])) + else: + volume_sizes.append((_v.volume_class, _v.volume_size)) + + for (vc, vs) in volume_sizes: + max_size = 0 + selected_storage = None + if _level == "cluster": + for sk in _candidate.cluster_avail_storages.keys(): + s = _candidate.cluster_avail_storages[sk] + if vc == "any" or s.storage_class == vc: + if s.storage_avail_disk > max_size: + max_size = s.storage_avail_disk + selected_storage = s + selected_storage.storage_avail_disk -= vs + elif _level == "rack": + for sk in _candidate.rack_avail_storages.keys(): + s = _candidate.rack_avail_storages[sk] + if vc == "any" or s.storage_class == vc: + if s.storage_avail_disk > max_size: + max_size = s.storage_avail_disk + selected_storage = s + selected_storage.storage_avail_disk -= vs + elif _level == "host": + for sk in _candidate.host_avail_storages.keys(): + s = _candidate.host_avail_storages[sk] + if vc == "any" or s.storage_class == vc: + if s.storage_avail_disk > max_size: + max_size = s.storage_avail_disk + selected_storage = s + selected_storage.storage_avail_disk -= vs + + def _deduct_candidate_nw_reservation(self, _candidate, _rsrv): + for srk in _candidate.host_avail_switches.keys(): + sr = _candidate.host_avail_switches[srk] + sr.avail_bandwidths = [bw - _rsrv[0] for bw in sr.avail_bandwidths] + + for srk in _candidate.rack_avail_switches.keys(): + sr = _candidate.rack_avail_switches[srk] + sr.avail_bandwidths = [bw - _rsrv[1] for bw in sr.avail_bandwidths] + + for srk in _candidate.cluster_avail_switches.keys(): + sr = _candidate.cluster_avail_switches[srk] + if sr.switch_type == "spine": + sr.avail_bandwidths = [bw - _rsrv[2] for bw in sr.avail_bandwidths] + + ''' + deduction modules + ''' + + def _deduct_reservation(self, _level, _best, _n): + exclusivities = self.constraint_solver.get_exclusivities(_n.node.exclusivity_groups, _level) + exclusivity_id = None + if len(exclusivities) == 1: + exclusivity_id = exclusivities[exclusivities.keys()[0]] + if exclusivity_id is not None: + self._add_exclusivity(_level, _best, exclusivity_id) + + affinity_id = _n.get_affinity_id() + if affinity_id is not None and affinity_id.split(":")[1] != "any": + self._add_affinity(_level, _best, affinity_id) + + if len(_n.node.diversity_groups) > 0: + for _, diversity_id in _n.node.diversity_groups.iteritems(): + if diversity_id.split(":")[1] != "any": + self._add_diversities(_level, _best, diversity_id) + + if isinstance(_n.node, VM) and _level == "host": + self._deduct_vm_resources(_best, _n) + elif isinstance(_n.node, Volume) and _level == "host": + self._deduct_volume_resources(_best, _n) + + def _add_exclusivity(self, _level, _best, _exclusivity_id): + lgr = None + if _exclusivity_id not in self.avail_logical_groups.keys(): + lgr = LogicalGroupResource() + lgr.name = _exclusivity_id + lgr.group_type = "EX" + self.avail_logical_groups[lgr.name] = lgr + + self.logger.debug("Search: add new exclusivity (" + _exclusivity_id + ")") + else: + lgr = self.avail_logical_groups[_exclusivity_id] + + if _exclusivity_id.split(":")[0] == _level: + lgr.num_of_placed_vms += 1 + + host_name = _best.get_resource_name(_level) + if host_name not in lgr.num_of_placed_vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[host_name] = 0 + lgr.num_of_placed_vms_per_host[host_name] += 1 + + chosen_host = self.avail_hosts[_best.host_name] + if _level == "host": + if _exclusivity_id not in chosen_host.host_memberships.keys(): + chosen_host.host_memberships[_exclusivity_id] = lgr + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if _exclusivity_id not in np.rack_memberships.keys(): + np.rack_memberships[_exclusivity_id] = lgr + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _exclusivity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_exclusivity_id] = lgr + elif _level == "rack": + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if _exclusivity_id not in np.rack_memberships.keys(): + np.rack_memberships[_exclusivity_id] = lgr + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _exclusivity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_exclusivity_id] = lgr + elif _level == "cluster": + for _, np in self.avail_hosts.iteritems(): + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _exclusivity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_exclusivity_id] = lgr + + def _add_affinity(self, _level, _best, _affinity_id): + lgr = None + if _affinity_id not in self.avail_logical_groups.keys(): + lgr = LogicalGroupResource() + lgr.name = _affinity_id + lgr.group_type = "AFF" + self.avail_logical_groups[lgr.name] = lgr + + self.logger.debug("Search: add new affinity (" + _affinity_id + ")") + else: + lgr = self.avail_logical_groups[_affinity_id] + + if _affinity_id.split(":")[0] == _level: + lgr.num_of_placed_vms += 1 + + host_name = _best.get_resource_name(_level) + if host_name not in lgr.num_of_placed_vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[host_name] = 0 + lgr.num_of_placed_vms_per_host[host_name] += 1 + + chosen_host = self.avail_hosts[_best.host_name] + if _level == "host": + if _affinity_id not in chosen_host.host_memberships.keys(): + chosen_host.host_memberships[_affinity_id] = lgr + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if _affinity_id not in np.rack_memberships.keys(): + np.rack_memberships[_affinity_id] = lgr + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _affinity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_affinity_id] = lgr + elif _level == "rack": + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if _affinity_id not in np.rack_memberships.keys(): + np.rack_memberships[_affinity_id] = lgr + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _affinity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_affinity_id] = lgr + elif _level == "cluster": + for _, np in self.avail_hosts.iteritems(): + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _affinity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_affinity_id] = lgr + + def _add_diversities(self, _level, _best, _diversity_id): + lgr = None + if _diversity_id not in self.avail_logical_groups.keys(): + lgr = LogicalGroupResource() + lgr.name = _diversity_id + lgr.group_type = "DIV" + self.avail_logical_groups[lgr.name] = lgr + + self.logger.debug("Search: add new diversity (" + _diversity_id + ")") + else: + lgr = self.avail_logical_groups[_diversity_id] + + if _diversity_id.split(":")[0] == _level: + lgr.num_of_placed_vms += 1 + + host_name = _best.get_resource_name(_level) + if host_name not in lgr.num_of_placed_vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[host_name] = 0 + lgr.num_of_placed_vms_per_host[host_name] += 1 + + chosen_host = self.avail_hosts[_best.host_name] + if _level == "host": + if _diversity_id not in chosen_host.host_memberships.keys(): + chosen_host.host_memberships[_diversity_id] = lgr + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if _diversity_id not in np.rack_memberships.keys(): + np.rack_memberships[_diversity_id] = lgr + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _diversity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_diversity_id] = lgr + elif _level == "rack": + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if _diversity_id not in np.rack_memberships.keys(): + np.rack_memberships[_diversity_id] = lgr + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _diversity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_diversity_id] = lgr + elif _level == "cluster": + for _, np in self.avail_hosts.iteritems(): + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if _diversity_id not in np.cluster_memberships.keys(): + np.cluster_memberships[_diversity_id] = lgr + + def _deduct_vm_resources(self, _best, _n): + chosen_host = self.avail_hosts[_best.host_name] + chosen_host.host_avail_vCPUs -= _n.node.vCPUs + chosen_host.host_avail_mem -= _n.node.mem + chosen_host.host_avail_local_disk -= _n.node.local_volume_size + + if chosen_host.host_num_of_placed_vms == 0: + self.num_of_hosts += 1 + chosen_host.host_num_of_placed_vms += 1 + + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + np.rack_avail_vCPUs -= _n.node.vCPUs + np.rack_avail_mem -= _n.node.mem + np.rack_avail_local_disk -= _n.node.local_volume_size + np.rack_num_of_placed_vms += 1 + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + np.cluster_avail_vCPUs -= _n.node.vCPUs + np.cluster_avail_mem -= _n.node.mem + np.cluster_avail_local_disk -= _n.node.local_volume_size + np.cluster_num_of_placed_vms += 1 + + for vml in _n.node.vm_list: + if vml.node in self.node_placements.keys(): + cn = self.avail_hosts[self.node_placements[vml.node].host_name] + placement_level = cn.get_common_placement(chosen_host) + bandwidth = vml.nw_bandwidth + self.bandwidth_usage += self._deduct_nw_reservation(placement_level, chosen_host, cn, bandwidth) + + for voll in _n.node.volume_list: + if voll.node in self.node_placements.keys(): + cn = self.avail_hosts[self.node_placements[voll.node].host_name] + placement_level = cn.get_common_placement(chosen_host) + bandwidth = voll.io_bandwidth + self.bandwidth_usage += self._deduct_nw_reservation(placement_level, chosen_host, cn, bandwidth) + + def _deduct_volume_resources(self, _best, _n): + storage_host = self.avail_storage_hosts[_best.storage.storage_name] + storage_host.storage_avail_disk -= _n.node.volume_size + + chosen_host = self.avail_hosts[_best.host_name] + + for vml in _n.node.vm_list: + if vml.node in self.node_placements.keys(): + cn = self.avail_hosts[self.node_placements[vml.node].host_name] + placement_level = cn.get_common_placement(chosen_host) + bandwidth = vml.io_bandwidth + self.bandwidth_usage += self._deduct_nw_reservation(placement_level, chosen_host, cn, bandwidth) + + def _deduct_nw_reservation(self, _placement_level, _host1, _host2, _rsrv): + nw_reservation = compute_reservation("host", _placement_level, _rsrv) + + if _placement_level == "host": + for _, sr in _host1.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + elif _placement_level == "rack": + for _, sr in _host1.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + + for _, sr in _host1.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + elif _placement_level == "cluster": + for _, sr in _host1.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + + for _, sr in _host1.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + + for _, sr in _host1.cluster_avail_switches.iteritems(): + if sr.switch_type == "spine": + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.cluster_avail_switches.iteritems(): + if sr.switch_type == "spine": + sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + + return nw_reservation + + def _close_node_placement(self, _level, _best, _v): + if _v not in self.node_placements.keys(): + if _level == "host" or isinstance(_v, VGroup): + self.node_placements[_v] = _best + + ''' + rollback modules + ''' + + def _rollback_reservation(self, _v): + if isinstance(_v, VM): + self._rollback_vm_reservation(_v) + + elif isinstance(_v, Volume): + self._rollback_volume_reservation(_v) + + elif isinstance(_v, VGroup): + for _, v in _v.subvgroups.iteritems(): + self._rollback_reservation(v) + + if _v in self.node_placements.keys(): + self.logger.debug("Search: node (" + _v.name + ") rollbacked") + + chosen_host = self.avail_hosts[self.node_placements[_v].host_name] + level = self.node_placements[_v].level + + if isinstance(_v, VGroup): + affinity_id = _v.level + ":" + _v.name + if _v.name != "any": + self._remove_affinity(chosen_host, affinity_id, level) + + exclusivities = self.constraint_solver.get_exclusivities(_v.exclusivity_groups, level) + if len(exclusivities) == 1: + exclusivity_id = exclusivities[exclusivities.keys()[0]] + self._remove_exclusivity(chosen_host, exclusivity_id, level) + + if len(_v.diversity_groups) > 0: + for _, diversity_id in _v.diversity_groups.iteritems(): + if diversity_id.split(":")[1] != "any": + self._remove_diversities(chosen_host, diversity_id, level) + + def _remove_exclusivity(self, _chosen_host, _exclusivity_id, _level): + if _exclusivity_id.split(":")[0] == _level: + lgr = self.avail_logical_groups[_exclusivity_id] + + host_name = _chosen_host.get_resource_name(_level) + lgr.num_of_placed_vms -= 1 + lgr.num_of_placed_vms_per_host[host_name] -= 1 + + if lgr.num_of_placed_vms_per_host[host_name] == 0: + del lgr.num_of_placed_vms_per_host[host_name] + + if lgr.num_of_placed_vms == 0: + del self.avail_logical_groups[_exclusivity_id] + + if _level == "host": + if _chosen_host.host_num_of_placed_vms == 0 and \ + _exclusivity_id in _chosen_host.host_memberships.keys(): + del _chosen_host.host_memberships[_exclusivity_id] + + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _exclusivity_id in np.rack_memberships.keys(): + del np.rack_memberships[_exclusivity_id] + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _exclusivity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_exclusivity_id] + + elif _level == "rack": + if _chosen_host.rack_num_of_placed_vms == 0: + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _exclusivity_id in np.rack_memberships.keys(): + del np.rack_memberships[_exclusivity_id] + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _exclusivity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_exclusivity_id] + + elif _level == "cluster": + if _chosen_host.cluster_num_of_placed_vms == 0: + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _exclusivity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_exclusivity_id] + + def _remove_affinity(self, _chosen_host, _affinity_id, _level): + if _affinity_id.split(":")[0] == _level: + lgr = self.avail_logical_groups[_affinity_id] + + host_name = _chosen_host.get_resource_name(_level) + lgr.num_of_placed_vms -= 1 + lgr.num_of_placed_vms_per_host[host_name] -= 1 + + if lgr.num_of_placed_vms_per_host[host_name] == 0: + del lgr.num_of_placed_vms_per_host[host_name] + + if lgr.num_of_placed_vms == 0: + del self.avail_logical_groups[_affinity_id] + + exist_affinity = True + if _affinity_id not in self.avail_logical_groups.keys(): + exist_affinity = False + else: + lgr = self.avail_logical_groups[_affinity_id] + host_name = _chosen_host.get_resource_name(_level) + if host_name not in lgr.num_of_placed_vms_per_host.keys(): + exist_affinity = False + + if _level == "host": + if exist_affinity is False and _affinity_id in _chosen_host.host_memberships.keys(): + del _chosen_host.host_memberships[_affinity_id] + + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _affinity_id in np.rack_memberships.keys(): + del np.rack_memberships[_affinity_id] + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _affinity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_affinity_id] + + elif _level == "rack": + if exist_affinity is False: + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _affinity_id in np.rack_memberships.keys(): + del np.rack_memberships[_affinity_id] + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _affinity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_affinity_id] + + elif _level == "cluster": + if exist_affinity is False: + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _affinity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_affinity_id] + + def _remove_diversities(self, _chosen_host, _diversity_id, _level): + if _diversity_id.split(":")[0] == _level: + lgr = self.avail_logical_groups[_diversity_id] + + host_name = _chosen_host.get_resource_name(_level) + lgr.num_of_placed_vms -= 1 + lgr.num_of_placed_vms_per_host[host_name] -= 1 + + if lgr.num_of_placed_vms_per_host[host_name] == 0: + del lgr.num_of_placed_vms_per_host[host_name] + + if lgr.num_of_placed_vms == 0: + del self.avail_logical_groups[_diversity_id] + + exist_diversity = True + if _diversity_id not in self.avail_logical_groups.keys(): + exist_diversity = False + else: + lgr = self.avail_logical_groups[_diversity_id] + host_name = _chosen_host.get_resource_name(_level) + if host_name not in lgr.num_of_placed_vms_per_host.keys(): + exist_diversity = False + + if _level == "host": + if exist_diversity is False and _diversity_id in _chosen_host.host_memberships.keys(): + del _chosen_host.host_memberships[_diversity_id] + + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _diversity_id in np.rack_memberships.keys(): + del np.rack_memberships[_diversity_id] + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _diversity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_diversity_id] + + elif _level == "rack": + if exist_diversity is False: + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _diversity_id in np.rack_memberships.keys(): + del np.rack_memberships[_diversity_id] + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _diversity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_diversity_id] + + elif _level == "cluster": + if exist_diversity is False: + for _, np in self.avail_hosts.iteritems(): + if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _diversity_id in np.cluster_memberships.keys(): + del np.cluster_memberships[_diversity_id] + + def _rollback_vm_reservation(self, _v): + if _v in self.node_placements.keys(): + chosen_host = self.avail_hosts[self.node_placements[_v].host_name] + chosen_host.host_avail_vCPUs += _v.vCPUs + chosen_host.host_avail_mem += _v.mem + chosen_host.host_avail_local_disk += _v.local_volume_size + + chosen_host.host_num_of_placed_vms -= 1 + if chosen_host.host_num_of_placed_vms == 0: + self.num_of_hosts -= 1 + + for _, np in self.avail_hosts.iteritems(): + if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + np.rack_avail_vCPUs += _v.vCPUs + np.rack_avail_mem += _v.mem + np.rack_avail_local_disk += _v.local_volume_size + np.rack_num_of_placed_vms -= 1 + if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + np.cluster_avail_vCPUs += _v.vCPUs + np.cluster_avail_mem += _v.mem + np.cluster_avail_local_disk += _v.local_volume_size + np.cluster_num_of_placed_vms -= 1 + + for vml in _v.vm_list: + if vml.node in self.node_placements.keys(): + cn = self.avail_hosts[self.node_placements[vml.node].host_name] + level = cn.get_common_placement(chosen_host) + bandwidth = vml.nw_bandwidth + self.bandwidth_usage -= self._rollback_nw_reservation(level, chosen_host, cn, bandwidth) + + for voll in _v.volume_list: + if voll.node in self.node_placements.keys(): + cn = self.avail_hosts[self.node_placements[voll.node].host_name] + level = cn.get_common_placement(chosen_host) + bandwidth = voll.io_bandwidth + self.bandwidth_usage -= self._rollback_nw_reservation(level, chosen_host, cn, bandwidth) + + def _rollback_volume_reservation(self, _v): + if _v in self.node_placements.keys(): + cs = self.node_placements[_v] + storage_host = self.avail_storage_hosts[cs.storage.storage_name] + storage_host.storage_avail_disk += _v.volume_size + + chosen_host = self.avail_hosts[self.node_placements[_v].host_name] + + for vml in _v.vm_list: + if vml.node in self.node_placements.keys(): + cn = self.avail_hosts[self.node_placements[vml.node].host_name] + level = cn.get_common_placement(chosen_host) + bandwidth = vml.io_bandwidth + self.bandwidth_usage -= self._rollback_nw_reservation(level, chosen_host, cn, bandwidth) + + def _rollback_nw_reservation(self, _level, _host1, _host2, _rsrv): + nw_reservation = compute_reservation("host", _level, _rsrv) + + if _level == "host": + for _, sr in _host1.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + elif _level == "rack": + for _, sr in _host1.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + + for _, sr in _host1.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + elif _level == "cluster": + for _, sr in _host1.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.host_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + + for _, sr in _host1.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.rack_avail_switches.iteritems(): + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + + for _, sr in _host1.cluster_avail_switches.iteritems(): + if sr.switch_type == "spine": + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + for _, sr in _host2.cluster_avail_switches.iteritems(): + if sr.switch_type == "spine": + sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + + return nw_reservation + + def _rollback_node_placement(self, _v): + if _v in self.node_placements.keys(): + del self.node_placements[_v] + self.logger.debug("Search: node (" + _v.name + ") removed from placement") + + if isinstance(_v, VGroup): + for _, sg in _v.subvgroups.iteritems(): + self._rollback_node_placement(sg) diff --git a/valet/engine/optimizer/ostro/search_base.py b/valet/engine/optimizer/ostro/search_base.py new file mode 100755 index 0000000..c2efefd --- /dev/null +++ b/valet/engine/optimizer/ostro/search_base.py @@ -0,0 +1,300 @@ +#!/bin/python + +# Modified: Sep. 22, 2016 + + +from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume, LEVELS + + +class Resource(object): + + def __init__(self): + self.level = None # level of placement + + self.host_name = None + self.host_memberships = {} # all mapped logical groups to host + self.host_vCPUs = 0 # original total vCPUs before overcommit + self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit + self.host_mem = 0 # original total mem cap before overcommit + self.host_avail_mem = 0 # remaining mem cap after + self.host_local_disk = 0 # original total local disk cap before overcommit + self.host_avail_local_disk = 0 # remaining local disk cap after overcommit + self.host_avail_switches = {} # all mapped switches to host + self.host_avail_storages = {} # all mapped storage_resources to host + self.host_num_of_placed_vms = 0 # the number of vms currently placed in this host + + self.rack_name = None # where this host is located + self.rack_memberships = {} + self.rack_vCPUs = 0 + self.rack_avail_vCPUs = 0 + self.rack_mem = 0 + self.rack_avail_mem = 0 + self.rack_local_disk = 0 + self.rack_avail_local_disk = 0 + self.rack_avail_switches = {} # all mapped switches to rack + self.rack_avail_storages = {} # all mapped storage_resources to rack + self.rack_num_of_placed_vms = 0 + + self.cluster_name = None # where this host and rack are located + self.cluster_memberships = {} + self.cluster_vCPUs = 0 + self.cluster_avail_vCPUs = 0 + self.cluster_mem = 0 + self.cluster_avail_mem = 0 + self.cluster_local_disk = 0 + self.cluster_avail_local_disk = 0 + self.cluster_avail_switches = {} # all mapped switches to cluster + self.cluster_avail_storages = {} # all mapped storage_resources to cluster + self.cluster_num_of_placed_vms = 0 + + self.storage = None # selected best storage for volume among host_avail_storages + + self.sort_base = 0 # order to place + + def get_common_placement(self, _resource): + level = None + + if self.cluster_name != _resource.cluster_name: + level = "cluster" + else: + if self.rack_name != _resource.rack_name: + level = "rack" + else: + if self.host_name != _resource.host_name: + level = "host" + else: + level = "ANY" + + return level + + def get_resource_name(self, _level): + name = "unknown" + + if _level == "cluster": + name = self.cluster_name + elif _level == "rack": + name = self.rack_name + elif _level == "host": + name = self.host_name + + return name + + def get_memberships(self, _level): + memberships = None + + if _level == "cluster": + memberships = self.cluster_memberships + elif _level == "rack": + memberships = self.rack_memberships + elif _level == "host": + memberships = self.host_memberships + + return memberships + + def get_num_of_placed_vms(self, _level): + num_of_vms = 0 + + if _level == "cluster": + num_of_vms = self.cluster_num_of_placed_vms + elif _level == "rack": + num_of_vms = self.rack_num_of_placed_vms + elif _level == "host": + num_of_vms = self.host_num_of_placed_vms + + return num_of_vms + + def get_avail_resources(self, _level): + avail_vCPUs = 0 + avail_mem = 0 + avail_local_disk = 0 + + if _level == "cluster": + avail_vCPUs = self.cluster_avail_vCPUs + avail_mem = self.cluster_avail_mem + avail_local_disk = self.cluster_avail_local_disk + elif _level == "rack": + avail_vCPUs = self.rack_avail_vCPUs + avail_mem = self.rack_avail_mem + avail_local_disk = self.rack_avail_local_disk + elif _level == "host": + avail_vCPUs = self.host_avail_vCPUs + avail_mem = self.host_avail_mem + avail_local_disk = self.host_avail_local_disk + + return (avail_vCPUs, avail_mem, avail_local_disk) + + def get_local_disk(self, _level): + local_disk = 0 + avail_local_disk = 0 + + if _level == "cluster": + local_disk = self.cluster_local_disk + avail_local_disk = self.cluster_avail_local_disk + elif _level == "rack": + local_disk = self.rack_local_disk + avail_local_disk = self.rack_avail_local_disk + elif _level == "host": + local_disk = self.host_local_disk + avail_local_disk = self.host_avail_local_disk + + return (local_disk, avail_local_disk) + + def get_vCPUs(self, _level): + vCPUs = 0 + avail_vCPUs = 0 + + if _level == "cluster": + vCPUs = self.cluster_vCPUs + avail_vCPUs = self.cluster_avail_vCPUs + elif _level == "rack": + vCPUs = self.rack_vCPUs + avail_vCPUs = self.rack_avail_vCPUs + elif _level == "host": + vCPUs = self.host_vCPUs + avail_vCPUs = self.host_avail_vCPUs + + return (vCPUs, avail_vCPUs) + + def get_mem(self, _level): + mem = 0 + avail_mem = 0 + + if _level == "cluster": + mem = self.cluster_mem + avail_mem = self.cluster_avail_mem + elif _level == "rack": + mem = self.rack_mem + avail_mem = self.rack_avail_mem + elif _level == "host": + mem = self.host_mem + avail_mem = self.host_avail_mem + + return (mem, avail_mem) + + def get_avail_storages(self, _level): + avail_storages = None + + if _level == "cluster": + avail_storages = self.cluster_avail_storages + elif _level == "rack": + avail_storages = self.rack_avail_storages + elif _level == "host": + avail_storages = self.host_avail_storages + + return avail_storages + + def get_avail_switches(self, _level): + avail_switches = None + + if _level == "cluster": + avail_switches = self.cluster_avail_switches + elif _level == "rack": + avail_switches = self.rack_avail_switches + elif _level == "host": + avail_switches = self.host_avail_switches + + return avail_switches + + +class LogicalGroupResource(object): + + def __init__(self): + self.name = None + self.group_type = "AGGR" + + self.metadata = {} + + self.num_of_placed_vms = 0 + self.num_of_placed_vms_per_host = {} # key = host (i.e., id of host or rack), value = num_of_placed_vms + + +class StorageResource(object): + + def __init__(self): + self.storage_name = None + self.storage_class = None + self.storage_avail_disk = 0 + + self.sort_base = 0 + + +class SwitchResource(object): + + def __init__(self): + self.switch_name = None + self.switch_type = None + self.avail_bandwidths = [] # out-bound bandwidths + + self.sort_base = 0 + + +class Node(object): + + def __init__(self): + self.node = None # VM, Volume, or VGroup + + self.sort_base = -1 + + def get_all_links(self): + link_list = [] + + if isinstance(self.node, VM): + for vml in self.node.vm_list: + link_list.append(vml) + for voll in self.node.volume_list: + link_list.append(voll) + elif isinstance(self.node, Volume): + for vml in self.node.vm_list: # vml is VolumeLink + link_list.append(vml) + elif isinstance(self.node, VGroup): + for vgl in self.node.vgroup_list: + link_list.append(vgl) + + return link_list + + def get_bandwidth_of_link(self, _link): + bandwidth = 0 + + if isinstance(self.node, VGroup) or isinstance(self.node, VM): + if isinstance(_link.node, VM): + bandwidth = _link.nw_bandwidth + elif isinstance(_link.node, Volume): + bandwidth = _link.io_bandwidth + else: + bandwidth = _link.io_bandwidth + + return bandwidth + + def get_common_diversity(self, _diversity_groups): + common_level = "ANY" + + for dk in self.node.diversity_groups.keys(): + if dk in _diversity_groups.keys(): + level = self.node.diversity_groups[dk].split(":")[0] + if common_level != "ANY": + if LEVELS.index(level) > LEVELS.index(common_level): + common_level = level + else: + common_level = level + + return common_level + + def get_affinity_id(self): + aff_id = None + + if isinstance(self.node, VGroup) and self.node.vgroup_type == "AFF" and \ + self.node.name != "any": + aff_id = self.node.level + ":" + self.node.name + + return aff_id + + +def compute_reservation(_level, _placement_level, _bandwidth): + reservation = 0 + + if _placement_level != "ANY": + diff = LEVELS.index(_placement_level) - LEVELS.index(_level) + 1 + if diff > 0: + reservation = _bandwidth * diff * 2 + + return reservation diff --git a/valet/engine/optimizer/ostro_server/__init__.py b/valet/engine/optimizer/ostro_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/optimizer/ostro_server/configuration.py b/valet/engine/optimizer/ostro_server/configuration.py new file mode 100755 index 0000000..7cfe8ef --- /dev/null +++ b/valet/engine/optimizer/ostro_server/configuration.py @@ -0,0 +1,269 @@ +#!/bin/python +################################################################################################################# +# Author: Gueyoung Jung +# Contact: gjung@research.att.com +# Version 2.0.2: Feb. 9, 2016 +# Modified: Sep. 16, 2016 +# +# Functions +# - Set all configurations to run Ostro +# +################################################################################################################# + +import os +from oslo_config import cfg +from valet.engine.conf import register_conf + + +CONF = cfg.CONF + + +class Config(object): + + def __init__(self, *default_config_files): + + register_conf() + if default_config_files: + CONF(default_config_files=default_config_files) + else: + CONF(project='valet') + + # System parameters + self.root_loc = os.path.dirname(CONF.default_config_files[0]) + + self.mode = None + + self.command = 'status' + + self.process = None + + self.control_loc = None + + self.api_protocol = 'http://' + + self.network_control = False + self.network_control_api = None + + self.db_keyspace = None + self.db_request_table = None + self.db_response_table = None + self.db_event_table = None + self.db_resource_table = None + self.db_app_table = None + self.db_resource_index_table = None + self.db_app_index_table = None + self.db_uuid_table = None + self.replication_factor = 3 + self.db_hosts = [] + + self.ip = None + + self.priority = 0 + + self.rpc_server_ip = None + self.rpc_server_port = 0 + + # Logging parameters + self.logger_name = None + self.logging_level = None + self.logging_loc = None + + self.resource_log_loc = None + self.app_log_loc = None + self.max_main_log_size = 0 + self.max_log_size = 0 + self.max_num_of_logs = 0 + + # Management parameters + self.datacenter_name = None + + self.num_of_region_chars = 0 + self.rack_code_list = [] + self.node_code_list = [] + + self.topology_trigger_time = None + self.topology_trigger_freq = 0 + self.compute_trigger_time = None + self.compute_trigger_freq = 0 + + self.default_cpu_allocation_ratio = 1 + self.default_ram_allocation_ratio = 1 + self.default_disk_allocation_ratio = 1 + + self.static_cpu_standby_ratio = 0 + self.static_mem_standby_ratio = 0 + self.static_local_disk_standby_ratio = 0 + + # Authentication parameters + self.project_name = None + self.user_name = None + self.pw = None + + # Simulation parameters + self.sim_cfg_loc = None + + self.num_of_hosts_per_rack = 0 + self.num_of_racks = 0 + self.num_of_spine_switches = 0 + self.num_of_aggregates = 0 + self.aggregated_ratio = 0 + + self.cpus_per_host = 0 + self.mem_per_host = 0 + self.disk_per_host = 0 + self.bandwidth_of_spine = 0 + self.bandwidth_of_rack = 0 + self.bandwidth_of_host = 0 + + self.num_of_basic_flavors = 0 + self.base_flavor_cpus = 0 + self.base_flavor_mem = 0 + self.base_flavor_disk = 0 + + def configure(self): + + status = self._init_system() + if status != "success": + return status + + self.sim_cfg_loc = self.root_loc + self.sim_cfg_loc + self.process = self.process + self.logging_loc = self.logging_loc + self.resource_log_loc = self.logging_loc + self.app_log_loc = self.logging_loc + self.eval_log_loc = self.logging_loc + + if self.mode.startswith("live") is False: + status = self._set_simulation() + if status != "success": + return status + + return "success" + + def _init_system(self): + + self.command = CONF.command + + self.mode = CONF.engine.mode + + self.priority = CONF.engine.priority + + self.logger_name = CONF.engine.logger_name + + self.logging_level = CONF.engine.logging_level + + self.logging_loc = CONF.engine.logging_dir + + self.resource_log_loc = CONF.engine.logging_dir + 'resources' + + self.app_log_loc = CONF.engine.logging_dir + 'app' + + self.eval_log_loc = CONF.engine.logging_dir + + self.max_log_size = CONF.engine.max_log_size + + self.max_num_of_logs = CONF.engine.max_num_of_logs + + self.process = CONF.engine.pid + + self.rpc_server_ip = CONF.engine.rpc_server_ip + + self.rpc_server_port = CONF.engine.rpc_server_port + + self.datacenter_name = CONF.engine.datacenter_name + + self.network_control = CONF.engine.network_control + + self.network_control_url = CONF.engine.network_control_url + + self.default_cpu_allocation_ratio = CONF.engine.default_cpu_allocation_ratio + + self.default_ram_allocation_ratio = CONF.engine.default_ram_allocation_ratio + + self.default_disk_allocation_ratio = CONF.engine.default_disk_allocation_ratio + + self.static_cpu_standby_ratio = CONF.engine.static_cpu_standby_ratio + + self.static_mem_standby_ratio = CONF.engine.static_mem_standby_ratio + + self.static_local_disk_standby_ratio = CONF.engine.static_local_disk_standby_ratio + + self.topology_trigger_time = CONF.engine.topology_trigger_time + + self.topology_trigger_freq = CONF.engine.topology_trigger_frequency + + self.compute_trigger_time = CONF.engine.compute_trigger_time + + self.compute_trigger_freq = CONF.engine.compute_trigger_frequency + + self.db_keyspace = CONF.music.keyspace + + self.db_request_table = CONF.music.request_table + + self.db_response_table = CONF.music.response_table + + self.db_event_table = CONF.music.event_table + + self.db_resource_table = CONF.music.resource_table + + self.db_app_table = CONF.music.app_table + + self.db_resource_index_table = CONF.music.resource_index_table + + self.db_app_index_table = CONF.music.app_index_table + + self.db_uuid_table = CONF.music.uuid_table + + self.replication_factor = CONF.music.replication_factor + + self.db_host = CONF.music.host + + self.ip = CONF.engine.ip + + self.num_of_region_chars = CONF.engine.num_of_region_chars + + self.rack_code_list = CONF.engine.rack_code_list + + self.node_code_list = CONF.engine.node_code_list + + self.sim_cfg_loc = CONF.engine.sim_cfg_loc + + self.project_name = CONF.identity.project_name + + self.user_name = CONF.identity.username + + self.pw = CONF.identity.password + + return "success" + + def _set_simulation(self): + + self.num_of_spine_switches = CONF.engine.num_of_spine_switches + + self.num_of_hosts_per_rack = CONF.engine.num_of_hosts_per_rack + + self.num_of_racks = CONF.engine.num_of_racks + + self.num_of_aggregates = CONF.engine.num_of_aggregates + + self.aggregated_ratio = CONF.engine.aggregated_ratio + + self.cpus_per_host = CONF.engine.cpus_per_host + + self.mem_per_host = CONF.engine.mem_per_host + + self.disk_per_host = CONF.engine.disk_per_host + + self.bandwidth_of_spine = CONF.engine.bandwidth_of_spine + + self.bandwidth_of_rack = CONF.engine.bandwidth_of_rack + + self.bandwidth_of_host = CONF.engine.bandwidth_of_host + + self.num_of_basic_flavors = CONF.engine.num_of_basic_flavors + + self.base_flavor_cpus = CONF.engine.base_flavor_cpus + + self.base_flavor_mem = CONF.engine.base_flavor_mem + + self.base_flavor_disk = CONF.engine.base_flavor_disk diff --git a/valet/engine/optimizer/ostro_server/daemon.py b/valet/engine/optimizer/ostro_server/daemon.py new file mode 100644 index 0000000..9a98c49 --- /dev/null +++ b/valet/engine/optimizer/ostro_server/daemon.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python + +# Modified: Mar. 1, 2016 + + +import atexit +import os +from signal import SIGTERM +import sys +import time + + +class Daemon(object): + """ A generic daemon class. + + Usage: subclass the Daemon class and override the run() method + """ + + def __init__(self, priority, pidfile, logger, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): + self.stdin = stdin + self.stdout = stdout + self.stderr = stderr + self.pidfile = pidfile + self.priority = priority + self.logger = logger + + def daemonize(self): + """ Do the UNIX double-fork magic, see Stevens' "Advanced + + Programming in the UNIX Environment" for details (ISBN 0201563177) + http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 + """ + try: + pid = os.fork() + if pid > 0: + # exit first parent + sys.exit(0) + except OSError as e: + self.logger.error("Daemon error at step1: " + e.strerror) + sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + # decouple from parent environment + os.chdir("/") + os.setsid() + os.umask(0) + + # do second fork + try: + pid = os.fork() + if pid > 0: + # exit from second parent + sys.exit(0) + except OSError as e: + self.logger.error("Daemon error at step2: " + e.strerror) + sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + # redirect standard file descriptors + sys.stdout.flush() + sys.stderr.flush() + si = file(self.stdin, 'r') + so = file(self.stdout, 'a+') + se = file(self.stderr, 'a+', 0) + os.dup2(si.fileno(), sys.stdin.fileno()) + os.dup2(so.fileno(), sys.stdout.fileno()) + os.dup2(se.fileno(), sys.stderr.fileno()) + + # write pidfile + atexit.register(self.delpid) + pid = str(os.getpid()) + file(self.pidfile, 'w+').write("%s\n" % pid) + + def delpid(self): + os.remove(self.pidfile) + + def getpid(self): + """returns the content of pidfile or None.""" + try: + pf = file(self.pidfile, 'r') + pid = int(pf.read().strip()) + pf.close() + except IOError: + pid = None + return pid + + def checkpid(self, pid): + """ Check For the existence of a unix pid. """ + if pid is None: + return False + + try: + os.kill(pid, 0) + except OSError: + self.delpid() + return False + else: + return True + + def start(self): + """Start the daemon""" + # Check for a pidfile to see if the daemon already runs + pid = self.getpid() + + if pid: + message = "pidfile %s already exist. Daemon already running?\n" + sys.stderr.write(message % self.pidfile) + sys.exit(1) + + # Start the daemon + self.daemonize() + self.run() + + def stop(self): + """Stop the daemon""" + # Get the pid from the pidfile + pid = self.getpid() + + if not pid: + message = "pidfile %s does not exist. Daemon not running?\n" + sys.stderr.write(message % self.pidfile) + return # not an error in a restart + + # Try killing the daemon process + try: + while 1: + os.kill(pid, SIGTERM) + time.sleep(0.1) + except OSError as err: + err = str(err) + if err.find("No such process") > 0: + if os.path.exists(self.pidfile): + os.remove(self.pidfile) + else: + # print str(err) + sys.exit(1) + + def restart(self): + """Restart the daemon""" + self.stop() + self.start() + + def status(self): + """ returns instance's priority """ + # Check for a pidfile to see if the daemon already runs + pid = self.getpid() + + status = 0 + + if self.checkpid(pid): + message = "status: pidfile %s exist. Daemon is running\n" + status = self.priority + else: + message = "status: pidfile %s does not exist. Daemon is not running\n" + + sys.stderr.write(message % self.pidfile) + return status + + def run(self): + """ You should override this method when you subclass Daemon. + + It will be called after the process has been daemonized by start() or restart(). + """ diff --git a/valet/engine/optimizer/ostro_server/db_cleaner.py b/valet/engine/optimizer/ostro_server/db_cleaner.py new file mode 100644 index 0000000..4efe157 --- /dev/null +++ b/valet/engine/optimizer/ostro_server/db_cleaner.py @@ -0,0 +1,151 @@ +#!/bin/python + + +################################################################################################################# +# Author: Gueyoung Jung +# Contact: gjung@research.att.com +# Version 2.0.2: Feb. 9, 2016 +# +# Functions +# - Handle user requests +# +################################################################################################################# + + +import sys + +from configuration import Config + +from valet.api.db.models.music import Music + + +class DBCleaner(object): + + def __init__(self, _config): + self.config = _config + + self.music = Music() + + def clean_db_tables(self): + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table) + if len(results) > 0: + print("resource table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_resource_table, 'site_name', row['site_name']) + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) + if len(results) > 0: + print("request table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_request_table, + 'stack_id', row['stack_id']) + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table) + if len(results) > 0: + print("response table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_response_table, + 'stack_id', row['stack_id']) + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) + if len(results) > 0: + print("event table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_event_table, + 'timestamp', row['timestamp']) + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table) + if len(results) > 0: + print("resource_index table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_resource_index_table, + 'site_name', row['site_name']) + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table) + if len(results) > 0: + print("app_index table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_app_index_table, + 'site_name', row['site_name']) + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table) + if len(results) > 0: + print("app table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_app_table, + 'stack_id', row['stack_id']) + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table) + if len(results) > 0: + print("uuid table result = ", len(results)) + for _, row in results.iteritems(): + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_uuid_table, + 'uuid', row['uuid']) + + def check_db_tables(self): + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table) + if len(results) > 0: + print("resource table not cleaned ") + else: + print("resource table cleaned") + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) + if len(results) > 0: + print("request table not cleaned ") + else: + print("request table cleaned") + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table) + if len(results) > 0: + print("response table not cleaned ") + else: + print("response table cleaned") + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) + if len(results) > 0: + print("event table not cleaned ") + else: + print("event table cleaned") + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table) + if len(results) > 0: + print("resource log index table not cleaned ") + else: + print("resource log index table cleaned") + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table) + if len(results) > 0: + print("app log index table not cleaned ") + else: + print("app log index table cleaned") + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table) + if len(results) > 0: + print("app log table not cleaned ") + else: + print("app log table cleaned") + + results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table) + if len(results) > 0: + print("uuid table not cleaned ") + else: + print("uuid table cleaned") + + +if __name__ == '__main__': + config = Config() + config_status = config.configure() + if config_status != "success": + print("Error while configuring Ostro: " + config_status) + sys.exit(2) + + c = DBCleaner(config) + c.clean_db_tables() + c.check_db_tables() diff --git a/valet/engine/optimizer/ostro_server/ostro_daemon.py b/valet/engine/optimizer/ostro_server/ostro_daemon.py new file mode 100755 index 0000000..037a2e9 --- /dev/null +++ b/valet/engine/optimizer/ostro_server/ostro_daemon.py @@ -0,0 +1,75 @@ +#!/bin/python + +# Modified: Sep. 22, 2016 + +import os +import sys +import traceback +from valet.engine.optimizer.ostro.ostro import Ostro +from valet.engine.optimizer.ostro_server.configuration import Config +from valet.engine.optimizer.ostro_server.daemon import Daemon # implemented for Python v2.7 +from valet.engine.optimizer.util.util import init_logger + + +class OstroDaemon(Daemon): + + def run(self): + + self.logger.info("##### Valet Engine is launched #####") + try: + ostro = Ostro(config, self.logger) + except Exception: + self.logger.error(traceback.format_exc()) + + if ostro.bootstrap() is False: + self.logger.error("ostro bootstrap failed") + sys.exit(2) + + ostro.run_ostro() + + +def verify_dirs(list_of_dirs): + for d in list_of_dirs: + try: + if not os.path.exists(d): + os.makedirs(d) + except OSError: + print("Error while verifying: " + d) + sys.exit(2) + + +if __name__ == "__main__": + ''' configuration ''' + # Configuration + try: + config = Config() + config_status = config.configure() + if config_status != "success": + print(config_status) + sys.exit(2) + + ''' verify directories ''' + dirs_list = [config.logging_loc, config.resource_log_loc, config.app_log_loc, os.path.dirname(config.process)] + verify_dirs(dirs_list) + + ''' logger ''' + logger = init_logger(config) + + # Start daemon process + daemon = OstroDaemon(config.priority, config.process, logger) + + logger.info("%s ostro ..." % config.command) + # switch case + exit_code = { + 'start': daemon.start, + 'stop': daemon.stop, + 'restart': daemon.restart, + 'status': daemon.status, + }[config.command]() + exit_code = exit_code or 0 + + except Exception: + logger.error(traceback.format_exc()) + exit_code = 2 + + sys.exit(int(exit_code)) diff --git a/valet/engine/optimizer/ostro_server/ostro_sim.cfg b/valet/engine/optimizer/ostro_server/ostro_sim.cfg new file mode 100644 index 0000000..ef3d380 --- /dev/null +++ b/valet/engine/optimizer/ostro_server/ostro_sim.cfg @@ -0,0 +1,25 @@ +# Version 2.0.2: Feb. 9, 2016 + +# Set simulation parameters +num_of_spine_switches=0 +#num_of_racks=1 +num_of_racks=2 +#num_of_hosts_per_rack=8 +num_of_hosts_per_rack=2 + +bandwidth_of_spine=40000 +bandwidth_of_rack=40000 +bandwidth_of_host=10000 + +num_of_aggregates=1 +aggregated_ratio=5 + +cpus_per_host=16 +mem_per_host=16000 +disk_per_host=1000 + +num_of_basic_flavors=3 +base_flavor_cpus=1 +base_flavor_mem=2000 +base_flavor_disk=40 + diff --git a/valet/engine/optimizer/util/__init__.py b/valet/engine/optimizer/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/optimizer/util/util.py b/valet/engine/optimizer/util/util.py new file mode 100755 index 0000000..f69e6bf --- /dev/null +++ b/valet/engine/optimizer/util/util.py @@ -0,0 +1,89 @@ +#!/bin/python + +# Modified: Feb. 9, 2016 + +from os import listdir, stat +from os.path import isfile, join +import logging +from logging.handlers import RotatingFileHandler + + +def get_logfile(_loc, _max_log_size, _name): + files = [f for f in listdir(_loc) if isfile(join(_loc, f))] + + logfile_index = 0 + for f in files: + f_name_list = f.split(".") + f_type = f_name_list[len(f_name_list) - 1] + if f_type == "log": + f_id_list = f.split("_") + temp_f_id = f_id_list[len(f_id_list) - 1] + f_id = temp_f_id.split(".")[0] + f_index = int(f_id) + if f_index > logfile_index: + logfile_index = f_index + + last_logfile = _name + "_" + str(logfile_index) + ".log" + + mode = None + if isfile(_loc + last_logfile) is True: + statinfo = stat(_loc + last_logfile) + if statinfo.st_size > _max_log_size: + last_logfile = _name + "_" + str(logfile_index + 1) + ".log" + mode = 'w' + else: + mode = 'a' + else: + mode = 'w' + + return (last_logfile, mode) + + +def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, _name, _last_index): + last_logfile = _name + "_" + str(_last_index) + ".log" + mode = None + + if isfile(_loc + last_logfile) is True: + statinfo = stat(_loc + last_logfile) + if statinfo.st_size > _max_log_size: + if (_last_index + 1) < _max_num_of_logs: + _last_index = _last_index + 1 + else: + _last_index = 0 + + last_logfile = _name + "_" + str(_last_index) + ".log" + + mode = 'w' + else: + mode = 'a' + else: + mode = 'w' + + return (last_logfile, _last_index, mode) + + +def adjust_json_string(_data): + _data = _data.replace("None", '"none"') + _data = _data.replace("False", '"false"') + _data = _data.replace("True", '"true"') + _data = _data.replace('_"none"', "_none") + _data = _data.replace('_"false"', "_false") + _data = _data.replace('_"true"', "_true") + + return _data + + +def init_logger(config): + log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") + log_handler = RotatingFileHandler(config.logging_loc + config.logger_name, + mode='a', + maxBytes=config.max_main_log_size, + backupCount=2, + encoding=None, + delay=0) + log_handler.setFormatter(log_formatter) + logger = logging.getLogger(config.logger_name) + logger.setLevel(logging.DEBUG if config.logging_level == "debug" else logging.INFO) + logger.addHandler(log_handler) + + return logger diff --git a/valet/engine/resource_manager/__init__.py b/valet/engine/resource_manager/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/resource_manager/compute.py b/valet/engine/resource_manager/compute.py new file mode 100755 index 0000000..3832554 --- /dev/null +++ b/valet/engine/resource_manager/compute.py @@ -0,0 +1,335 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + +from novaclient import client as nova_client +from oslo_config import cfg +from resource_base import Host, LogicalGroup, Flavor +import traceback + +# Nova API v2 +VERSION = 2 + +CONF = cfg.CONF + + +class Compute(object): + def __init__(self, _logger): + self.logger = _logger + self.nova = None + + def set_hosts(self, _hosts, _logical_groups): + + self._get_nova_client() + + status = self._set_availability_zones(_hosts, _logical_groups) + if status != "success": + self.logger.error('_set_availability_zones failed') + return status + + status = self._set_aggregates(_hosts, _logical_groups) + if status != "success": + self.logger.error('_set_aggregates failed') + return status + + status = self._set_placed_vms(_hosts, _logical_groups) + if status != "success": + self.logger.error('_set_placed_vms failed') + return status + + status = self._set_resources(_hosts) + if status != "success": + self.logger.error('_set_resources failed') + return status + + return "success" + + def _get_nova_client(self): + '''Returns a nova client''' + self.nova = nova_client.Client(VERSION, + CONF.identity.username, + CONF.identity.password, + CONF.identity.project_name, + CONF.identity.auth_url) + + def _set_availability_zones(self, _hosts, _logical_groups): + try: + hosts_list = self.nova.hosts.list() + + try: + for h in hosts_list: + if h.service == "compute": + host = Host(h.host_name) + host.tag.append("nova") + + logical_group = None + if h.zone not in _logical_groups.keys(): + logical_group = LogicalGroup(h.zone) + logical_group.group_type = "AZ" + _logical_groups[logical_group.name] = logical_group + else: + logical_group = _logical_groups[h.zone] + + host.memberships[logical_group.name] = logical_group + + if host.name not in logical_group.vms_per_host.keys(): + logical_group.vms_per_host[host.name] = [] + + self.logger.info("adding Host LogicalGroup: " + str(host.__dict__)) + + _hosts[host.name] = host + + except (ValueError, KeyError, TypeError): + self.logger.error(traceback.format_exc()) + return "Error while setting host zones from Nova" + + except Exception: + self.logger.critical(traceback.format_exc()) + + return "success" + + def _set_aggregates(self, _hosts, _logical_groups): + aggregate_list = self.nova.aggregates.list() + + try: + for a in aggregate_list: + aggregate = LogicalGroup(a.name) + aggregate.group_type = "AGGR" + if a.deleted is not False: + aggregate.status = "disabled" + + metadata = {} + for mk in a.metadata.keys(): + metadata[mk] = a.metadata.get(mk) + aggregate.metadata = metadata + + self.logger.info("adding aggregate LogicalGroup: " + str(aggregate.__dict__)) + + _logical_groups[aggregate.name] = aggregate + + for hn in a.hosts: + host = _hosts[hn] + host.memberships[aggregate.name] = aggregate + + aggregate.vms_per_host[host.name] = [] + + except (ValueError, KeyError, TypeError): + self.logger.error(traceback.format_exc()) + return "Error while setting host aggregates from Nova" + + return "success" + + # NOTE: do not set any info in _logical_groups + def _set_placed_vms(self, _hosts, _logical_groups): + error_status = None + + for hk in _hosts.keys(): + vm_uuid_list = [] + result_status = self._get_vms_of_host(hk, vm_uuid_list) + + if result_status == "success": + for vm_uuid in vm_uuid_list: + vm_detail = [] # (vm_name, az, metadata, status) + result_status_detail = self._get_vm_detail(vm_uuid, vm_detail) + + if result_status_detail == "success": + # if vm_detail[3] != "SHUTOFF": # status == "ACTIVE" or "SUSPENDED" + vm_id = ("none", vm_detail[0], vm_uuid) + _hosts[hk].vm_list.append(vm_id) + + # _logical_groups[vm_detail[1]].vm_list.append(vm_id) + # _logical_groups[vm_detail[1]].vms_per_host[hk].append(vm_id) + else: + error_status = result_status_detail + break + else: + error_status = result_status + + if error_status is not None: + break + + if error_status is None: + return "success" + else: + return error_status + + def _get_vms_of_host(self, _hk, _vm_list): + hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk, servers=True) + + try: + for hv in hypervisor_list: + if hasattr(hv, 'servers'): + server_list = hv.__getattr__('servers') + for s in server_list: + _vm_list.append(s.uuid) + + except (ValueError, KeyError, TypeError): + self.logger.error(traceback.format_exc()) + return "Error while getting existing vms" + + return "success" + + def _get_vm_detail(self, _vm_uuid, _vm_detail): + server = self.nova.servers.get(_vm_uuid) + + try: + vm_name = server.name + _vm_detail.append(vm_name) + az = server.__getattr("OS-EXT-AZ:availability_zone") + _vm_detail.append(az) + metadata = server.metadata + _vm_detail.append(metadata) + status = server.status + _vm_detail.append(status) + + except (ValueError, KeyError, TypeError): + self.logger.error(traceback.format_exc()) + return "Error while getting vm detail" + + return "success" + + def _set_resources(self, _hosts): + # Returns Hypervisor list + host_list = self.nova.hypervisors.list() + + try: + for hv in host_list: + if hv.service['host'] in _hosts.keys(): + host = _hosts[hv.service['host']] + host.status = hv.status + host.state = hv.state + host.original_vCPUs = float(hv.vcpus) + host.vCPUs_used = float(hv.vcpus_used) + host.original_mem_cap = float(hv.memory_mb) + host.free_mem_mb = float(hv.free_ram_mb) + host.original_local_disk_cap = float(hv.local_gb) + host.free_disk_gb = float(hv.free_disk_gb) + host.disk_available_least = float(hv.disk_available_least) + + except (ValueError, KeyError, TypeError): + self.logger.error(traceback.format_exc()) + return "Error while setting host resources from Nova" + + return "success" + + def set_flavors(self, _flavors): + error_status = None + + self._get_nova_client() + + result_status = self._set_flavors(_flavors) + + if result_status == "success": + for _, f in _flavors.iteritems(): + result_status_detail = self._set_extra_specs(f) + if result_status_detail != "success": + error_status = result_status_detail + break + else: + error_status = result_status + + if error_status is None: + return "success" + else: + return error_status + + def _set_flavors(self, _flavors): + # Get a list of all flavors + flavor_list = self.nova.flavors.list() + + try: + for f in flavor_list: + flavor = Flavor(f.name) + flavor.flavor_id = f.id + if hasattr(f, "OS-FLV-DISABLED:disabled"): + if getattr(f, "OS-FLV-DISABLED:disabled"): + flavor.status = "disabled" + + flavor.vCPUs = float(f.vcpus) + flavor.mem_cap = float(f.ram) + + root_gb = float(f.disk) + + ephemeral_gb = 0.0 + if hasattr(f, "OS-FLV-EXT-DATA:ephemeral"): + ephemeral_gb = float(getattr(f, "OS-FLV-EXT-DATA:ephemeral")) + + swap_mb = 0.0 + if hasattr(f, "swap"): + sw = getattr(f, "swap") + if sw != '': + swap_mb = float(sw) + + flavor.disk_cap = root_gb + ephemeral_gb + swap_mb / float(1024) + + self.logger.info("adding flavor " + str(flavor.__dict__)) + + _flavors[flavor.name] = flavor + + except (ValueError, KeyError, TypeError): + self.logger.error(traceback.format_exc()) + return "Error while getting flavors" + + return "success" + + def _set_extra_specs(self, _flavor): + try: + # Get a list of all flavors + flavors_list = self.nova.flavors.list() + # Get flavor from flavor_list + for flavor in flavors_list: + if flavor.id == _flavor.flavor_id: + + extra_specs = flavor.get_keys() + + for sk, sv in extra_specs.iteritems(): + _flavor.extra_specs[sk] = sv + + break + + except (ValueError, KeyError, TypeError): + self.logger.error(traceback.format_exc()) + return "Error while getting flavor extra spec" + + return "success" + + +# Unit test +''' +if __name__ == '__main__': + config = Config() + config_status = config.configure() + if config_status != "success": + print "Error while configuring Ostro: " + config_status + sys.exit(2) + + auth = Authentication() + + admin_token = auth.get_tenant_token(config) + if admin_token is None: + print "Error while getting admin_token" + sys.exit(2) + else: + print "admin_token=",admin_token + + project_token = auth.get_project_token(config, admin_token) + if project_token is None: + print "Error while getting project_token" + sys.exit(2) + else: + print "project_token=",project_token + + c = Compute(config, admin_token, project_token) + + hosts = {} + logical_groups = {} + flavors = {} + + #c._set_availability_zones(hosts, logical_groups) + #c._set_aggregates(None, logical_groups) + #c._set_placed_vms(hosts, logical_groups) + #c._get_vms_of_host("qos101", None) + #c._get_vm_detail("20b2890b-81bb-4942-94bf-c6bee29630bb", None) + c._set_resources(hosts) + #c._set_flavors(flavors) +''' diff --git a/valet/engine/resource_manager/compute_manager.py b/valet/engine/resource_manager/compute_manager.py new file mode 100755 index 0000000..d34801d --- /dev/null +++ b/valet/engine/resource_manager/compute_manager.py @@ -0,0 +1,406 @@ +#!/bin/python + +# Modified: Sep. 22, 2016 + + +import threading +import time + +from copy import deepcopy +from valet.engine.resource_manager.compute import Compute +from valet.engine.resource_manager.compute_simulator import SimCompute +from valet.engine.resource_manager.resource_base import Host + + +class ComputeManager(threading.Thread): + + def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config, _logger): + threading.Thread.__init__(self) + + self.thread_id = _t_id + self.thread_name = _t_name + self.data_lock = _data_lock + self.end_of_process = False + + self.resource = _rsc + + self.config = _config + + self.logger = _logger + + # self.auth = Authentication(_logger) + self.admin_token = None + self.project_token = None + + def run(self): + self.logger.info("ComputeManager: start " + self.thread_name + " ......") + + if self.config.compute_trigger_freq > 0: + period_end = time.time() + self.config.compute_trigger_freq + + while self.end_of_process is False: + time.sleep(60) + + if time.time() > period_end: + self._run() + period_end = time.time() + self.config.compute_trigger_freq + + else: + (alarm_HH, alarm_MM) = self.config.compute_trigger_time.split(':') + + now = time.localtime() + timeout = True + last_trigger_year = now.tm_year + last_trigger_mon = now.tm_mon + last_trigger_mday = now.tm_mday + + while self.end_of_process is False: + time.sleep(60) + + now = time.localtime() + if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday: + timeout = False + + if timeout is False and \ + now.tm_hour >= int(alarm_HH) and now.tm_min >= int(alarm_MM): + self._run() + + timeout = True + last_trigger_year = now.tm_year + last_trigger_mon = now.tm_mon + last_trigger_mday = now.tm_mday + + self.logger.info("ComputeManager: exit " + self.thread_name) + + def _run(self): + self.logger.info("ComputeManager: --- start compute_nodes status update ---") + + self.data_lock.acquire() + try: + triggered_host_updates = self.set_hosts() + triggered_flavor_updates = self.set_flavors() + + if triggered_host_updates is True and triggered_flavor_updates is True: + if self.resource.update_topology() is False: + # TODO(GY): error in MUSIC. ignore? + pass + else: + # TODO(GY): error handling, e.g., 3 times failure then stop Ostro? + pass + finally: + self.data_lock.release() + + self.logger.info("ComputeManager: --- done compute_nodes status update ---") + + return True + + # def _set_admin_token(self): + # self.admin_token = self.auth.get_tenant_token(self.config) + # if self.admin_token is None: + # self.logger.error("ComputeManager: " + self.auth.status) + # return False + # + # return True + + # def _set_project_token(self): + # self.project_token = self.auth.get_project_token(self.config, self.admin_token) + # if self.project_token is None: + # self.logger.error("ComputeManager: " + self.auth.status) + # return False + # + # return True + + def set_hosts(self): + hosts = {} + logical_groups = {} + + compute = None + if self.config.mode.startswith("sim") is True or \ + self.config.mode.startswith("test") is True: + compute = SimCompute(self.config) + else: + compute = Compute(self.logger) + + status = compute.set_hosts(hosts, logical_groups) + if status != "success": + self.logger.error("ComputeManager: " + status) + return False + + self._compute_avail_host_resources(hosts) + + self._check_logical_group_update(logical_groups) + self._check_host_update(hosts) + + return True + + def _compute_avail_host_resources(self, _hosts): + for hk, host in _hosts.iteritems(): + self.resource.compute_avail_resources(hk, host) + + def _check_logical_group_update(self, _logical_groups): + for lk in _logical_groups.keys(): + if lk not in self.resource.logical_groups.keys(): + self.resource.logical_groups[lk] = deepcopy(_logical_groups[lk]) + + self.resource.logical_groups[lk].last_update = time.time() + self.logger.warn("ComputeManager: new logical group (" + lk + ") added") + + for rlk in self.resource.logical_groups.keys(): + rl = self.resource.logical_groups[rlk] + if rl.group_type != "EX" and rl.group_type != "AFF" and rl.group_type != "DIV": + if rlk not in _logical_groups.keys(): + self.resource.logical_groups[rlk].status = "disabled" + + self.resource.logical_groups[rlk].last_update = time.time() + self.logger.warn("ComputeManager: logical group (" + rlk + ") removed") + + for lk in _logical_groups.keys(): + lg = _logical_groups[lk] + rlg = self.resource.logical_groups[lk] + if lg.group_type != "EX" and lg.group_type != "AFF" and lg.group_type != "DIV": + if self._check_logical_group_metadata_update(lg, rlg) is True: + + rlg.last_update = time.time() + self.logger.warn("ComputeManager: logical group (" + lk + ") updated") + + def _check_logical_group_metadata_update(self, _lg, _rlg): + if _lg.status != _rlg.status: + _rlg.status = _lg.status + + for mdk in _lg.metadata.keys(): + if mdk not in _rlg.metadata.keys(): + _rlg.metadata[mdk] = _lg.metadata[mdk] + + for rmdk in _rlg.metadata.keys(): + if rmdk not in _lg.metadata.keys(): + del _rlg.metadata[rmdk] + + for hk in _lg.vms_per_host.keys(): + if hk not in _rlg.vms_per_host.keys(): + _rlg.vms_per_host[hk] = deepcopy(_lg.vms_per_host[hk]) + + for rhk in _rlg.vms_per_host.keys(): + if rhk not in _lg.vms_per_host.keys(): + del _rlg.vms_per_host[rhk] + + def _check_host_update(self, _hosts): + for hk in _hosts.keys(): + if hk not in self.resource.hosts.keys(): + new_host = Host(hk) + self.resource.hosts[new_host.name] = new_host + + new_host.last_update = time.time() + self.logger.warn("ComputeManager: new host (" + new_host.name + ") added") + + for rhk, rhost in self.resource.hosts.iteritems(): + if rhk not in _hosts.keys(): + if "nova" in rhost.tag: + rhost.tag.remove("nova") + + rhost.last_update = time.time() + self.logger.warn("ComputeManager: host (" + rhost.name + ") disabled") + + for hk in _hosts.keys(): + host = _hosts[hk] + rhost = self.resource.hosts[hk] + if self._check_host_config_update(host, rhost) is True: + rhost.last_update = time.time() + + for hk, h in self.resource.hosts.iteritems(): + if h.clean_memberships() is True: + h.last_update = time.time() + self.logger.warn("ComputeManager: host (" + h.name + ") updated (delete EX/AFF/DIV membership)") + + for hk, host in self.resource.hosts.iteritems(): + if host.last_update > self.resource.current_timestamp: + self.resource.update_rack_resource(host) + + def _check_host_config_update(self, _host, _rhost): + topology_updated = False + + topology_updated = self._check_host_status(_host, _rhost) + topology_updated = self._check_host_resources(_host, _rhost) + topology_updated = self._check_host_memberships(_host, _rhost) + topology_updated = self._check_host_vms(_host, _rhost) + + return topology_updated + + def _check_host_status(self, _host, _rhost): + topology_updated = False + + if "nova" not in _rhost.tag: + _rhost.tag.append("nova") + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (tag added)") + + if _host.status != _rhost.status: + _rhost.status = _host.status + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (status changed)") + + if _host.state != _rhost.state: + _rhost.state = _host.state + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (state changed)") + + return topology_updated + + def _check_host_resources(self, _host, _rhost): + topology_updated = False + + if _host.vCPUs != _rhost.vCPUs or \ + _host.original_vCPUs != _rhost.original_vCPUs or \ + _host.avail_vCPUs != _rhost.avail_vCPUs: + _rhost.vCPUs = _host.vCPUs + _rhost.original_vCPUs = _host.original_vCPUs + _rhost.avail_vCPUs = _host.avail_vCPUs + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (CPU updated)") + + if _host.mem_cap != _rhost.mem_cap or \ + _host.original_mem_cap != _rhost.original_mem_cap or \ + _host.avail_mem_cap != _rhost.avail_mem_cap: + _rhost.mem_cap = _host.mem_cap + _rhost.original_mem_cap = _host.original_mem_cap + _rhost.avail_mem_cap = _host.avail_mem_cap + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (mem updated)") + + if _host.local_disk_cap != _rhost.local_disk_cap or \ + _host.original_local_disk_cap != _rhost.original_local_disk_cap or \ + _host.avail_local_disk_cap != _rhost.avail_local_disk_cap: + _rhost.local_disk_cap = _host.local_disk_cap + _rhost.original_local_disk_cap = _host.original_local_disk_cap + _rhost.avail_local_disk_cap = _host.avail_local_disk_cap + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (local disk space updated)") + + if _host.vCPUs_used != _rhost.vCPUs_used or \ + _host.free_mem_mb != _rhost.free_mem_mb or \ + _host.free_disk_gb != _rhost.free_disk_gb or \ + _host.disk_available_least != _rhost.disk_available_least: + _rhost.vCPUs_used = _host.vCPUs_used + _rhost.free_mem_mb = _host.free_mem_mb + _rhost.free_disk_gb = _host.free_disk_gb + _rhost.disk_available_least = _host.disk_available_least + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (other resource numbers)") + + return topology_updated + + def _check_host_memberships(self, _host, _rhost): + topology_updated = False + + for mk in _host.memberships.keys(): + if mk not in _rhost.memberships.keys(): + _rhost.memberships[mk] = self.resource.logical_groups[mk] + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new membership)") + + for mk in _rhost.memberships.keys(): + m = _rhost.memberships[mk] + if m.group_type != "EX" and m.group_type != "AFF" and m.group_type != "DIV": + if mk not in _host.memberships.keys(): + del _rhost.memberships[mk] + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (delete membership)") + + return topology_updated + + def _check_host_vms(self, _host, _rhost): + topology_updated = False + + ''' clean up VMs ''' + for rvm_id in _rhost.vm_list: + if rvm_id[2] == "none": + _rhost.vm_list.remove(rvm_id) + + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (none vm removed)") + + self.resource.clean_none_vms_from_logical_groups(_rhost) + + for vm_id in _host.vm_list: + if _rhost.exist_vm_by_uuid(vm_id[2]) is False: + _rhost.vm_list.append(vm_id) + + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new vm placed)") + + for rvm_id in _rhost.vm_list: + if _host.exist_vm_by_uuid(rvm_id[2]) is False: + _rhost.vm_list.remove(rvm_id) + + self.resource.remove_vm_by_uuid_from_logical_groups(_rhost, rvm_id[2]) + + topology_updated = True + self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (vm removed)") + + return topology_updated + + def set_flavors(self): + flavors = {} + + compute = None + if self.config.mode.startswith("sim") is True or \ + self.config.mode.startswith("test") is True: + compute = SimCompute(self.config) + else: + compute = Compute(self.logger) + + status = compute.set_flavors(flavors) + if status != "success": + self.logger.error("ComputeManager: " + status) + return False + + self._check_flavor_update(flavors) + + return True + + def _check_flavor_update(self, _flavors): + for fk in _flavors.keys(): + if fk not in self.resource.flavors.keys(): + self.resource.flavors[fk] = deepcopy(_flavors[fk]) + + self.resource.flavors[fk].last_update = time.time() + self.logger.warn("ComputeManager: new flavor (" + fk + ") added") + + for rfk in self.resource.flavors.keys(): + if rfk not in _flavors.keys(): + self.resource.flavors[rfk].status = "disabled" + + self.resource.flavors[rfk].last_update = time.time() + self.logger.warn("ComputeManager: flavor (" + rfk + ") removed") + + for fk in _flavors.keys(): + f = _flavors[fk] + rf = self.resource.flavors[fk] + + if self._check_flavor_spec_update(f, rf) is True: + rf.last_update = time.time() + self.logger.warn("ComputeManager: flavor (" + fk + ") spec updated") + + def _check_flavor_spec_update(self, _f, _rf): + spec_updated = False + + if _f.status != _rf.status: + _rf.status = _f.status + spec_updated = True + + if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or _f.disk_cap != _rf.disk_cap: + _rf.vCPUs = _f.vCPUs + _rf.mem_cap = _f.mem_cap + _rf.disk_cap = _f.disk_cap + spec_updated = True + + for sk in _f.extra_specs.keys(): + if sk not in _rf.extra_specs.keys(): + _rf.extra_specs[sk] = _f.extra_specs[sk] + spec_updated = True + + for rsk in _rf.extra_specs.keys(): + if rsk not in _f.extra_specs.keys(): + del _rf.extra_specs[rsk] + spec_updated = True + + return spec_updated diff --git a/valet/engine/resource_manager/compute_simulator.py b/valet/engine/resource_manager/compute_simulator.py new file mode 100644 index 0000000..032428f --- /dev/null +++ b/valet/engine/resource_manager/compute_simulator.py @@ -0,0 +1,101 @@ +#!/bin/python + +# Modified: Sep. 4, 2016 + + +from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor + + +class SimCompute(object): + + def __init__(self, _config): + self.config = _config + self.datacenter_name = "sim" + + def set_hosts(self, _hosts, _logical_groups): + self._set_availability_zones(_hosts, _logical_groups) + + self._set_aggregates(_hosts, _logical_groups) + + self._set_placed_vms(_hosts, _logical_groups) + + self._set_resources(_hosts) + + return "success" + + def _set_availability_zones(self, _hosts, _logical_groups): + logical_group = LogicalGroup("nova") + logical_group.group_type = "AZ" + _logical_groups[logical_group.name] = logical_group + + for r_num in range(0, self.config.num_of_racks): + for h_num in range(0, self.config.num_of_hosts_per_rack): + host = Host(self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num)) + host.tag.append("nova") + host.memberships["nova"] = logical_group + + logical_group.vms_per_host[host.name] = [] + + _hosts[host.name] = host + + def _set_aggregates(self, _hosts, _logical_groups): + for a_num in range(0, self.config.num_of_aggregates): + metadata = {} + metadata["cpu_allocation_ratio"] = "0.5" + + aggregate = LogicalGroup("aggregate" + str(a_num)) + aggregate.group_type = "AGGR" + aggregate.metadata = metadata + + _logical_groups[aggregate.name] = aggregate + + for a_num in range(0, self.config.num_of_aggregates): + aggregate = _logical_groups["aggregate" + str(a_num)] + for r_num in range(0, self.config.num_of_racks): + for h_num in range(0, self.config.num_of_hosts_per_rack): + host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num) + if host_name in _hosts.keys(): + if (h_num % (self.config.aggregated_ratio + a_num)) == 0: + host = _hosts[host_name] + host.memberships[aggregate.name] = aggregate + + aggregate.vms_per_host[host.name] = [] + + def _set_placed_vms(self, _hosts, _logical_groups): + pass + + def _set_resources(self, _hosts): + for r_num in range(0, self.config.num_of_racks): + for h_num in range(0, self.config.num_of_hosts_per_rack): + host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num) + if host_name in _hosts.keys(): + host = _hosts[host_name] + host.original_vCPUs = float(self.config.cpus_per_host) + host.vCPUs_used = 0.0 + host.original_mem_cap = float(self.config.mem_per_host) + host.free_mem_mb = host.original_mem_cap + host.original_local_disk_cap = float(self.config.disk_per_host) + host.free_disk_gb = host.original_local_disk_cap + host.disk_available_least = host.original_local_disk_cap + + def set_flavors(self, _flavors): + for f_num in range(0, self.config.num_of_basic_flavors): + flavor = Flavor("bflavor" + str(f_num)) + flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1)) + flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1)) + flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0 + + _flavors[flavor.name] = flavor + + for a_num in range(0, self.config.num_of_aggregates): + flavor = Flavor("sflavor" + str(a_num)) + flavor.vCPUs = self.config.base_flavor_cpus * (a_num + 1) + flavor.mem_cap = self.config.base_flavor_mem * (a_num + 1) + flavor.disk_cap = self.config.base_flavor_disk * (a_num + 1) + + # flavor.extra_specs["availability_zone"] = "nova" + flavor.extra_specs["cpu_allocation_ratio"] = "0.5" + + _flavors[flavor.name] = flavor + + return "success" diff --git a/valet/engine/resource_manager/resource.py b/valet/engine/resource_manager/resource.py new file mode 100755 index 0000000..98e32c3 --- /dev/null +++ b/valet/engine/resource_manager/resource.py @@ -0,0 +1,933 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + +import json +import sys +import time +import traceback + +from valet.engine.optimizer.app_manager.app_topology_base import LEVELS +from valet.engine.optimizer.util import util as util +from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, LogicalGroup +from valet.engine.resource_manager.resource_base import Flavor, Switch, Link + + +class Resource(object): + + def __init__(self, _db, _config, _logger): + self.db = _db + + self.config = _config + self.logger = _logger + + ''' resource data ''' + self.datacenter = Datacenter(self.config.datacenter_name) + self.host_groups = {} + self.hosts = {} + self.switches = {} + self.storage_hosts = {} + + ''' metadata ''' + self.logical_groups = {} + self.flavors = {} + + self.current_timestamp = 0 + self.last_log_index = 0 + + ''' resource status aggregation ''' + self.CPU_avail = 0 + self.mem_avail = 0 + self.local_disk_avail = 0 + self.disk_avail = 0 + self.nw_bandwidth_avail = 0 + + def bootstrap_from_db(self, _resource_status): + try: + logical_groups = _resource_status.get("logical_groups") + if logical_groups: + for lgk, lg in logical_groups.iteritems(): + logical_group = LogicalGroup(lgk) + logical_group.group_type = lg.get("group_type") + logical_group.status = lg.get("status") + logical_group.metadata = lg.get("metadata") + logical_group.vm_list = lg.get("vm_list") + logical_group.volume_list = lg.get("volume_list", []) + logical_group.vms_per_host = lg.get("vms_per_host") + + self.logical_groups[lgk] = logical_group + + if len(self.logical_groups) > 0: + self.logger.debug("Resource.bootstrap_from_db: logical_groups loaded") + else: + self.logger.warn("Resource.bootstrap_from_db: no logical_groups") + + flavors = _resource_status.get("flavors") + if flavors: + for fk, f in flavors.iteritems(): + flavor = Flavor(fk) + flavor.flavor_id = f.get("flavor_id") + flavor.status = f.get("status") + flavor.vCPUs = f.get("vCPUs") + flavor.mem_cap = f.get("mem") + flavor.disk_cap = f.get("disk") + flavor.extra_specs = f.get("extra_specs") + + self.flavors[fk] = flavor + + if len(self.flavors) > 0: + self.logger.debug("Resource.bootstrap_from_db: flavors loaded") + else: + self.logger.error("Resource.bootstrap_from_db: fail loading flavors") + # return False + + switches = _resource_status.get("switches") + if switches: + for sk, s in switches.iteritems(): + switch = Switch(sk) + switch.switch_type = s.get("switch_type") + switch.status = s.get("status") + + self.switches[sk] = switch + + if len(self.switches) > 0: + self.logger.debug("Resource.bootstrap_from_db: switches loaded") + for sk, s in switches.iteritems(): + switch = self.switches[sk] + + up_links = {} + uls = s.get("up_links") + for ulk, ul in uls.iteritems(): + ulink = Link(ulk) + ulink.resource = self.switches[ul.get("resource")] + ulink.nw_bandwidth = ul.get("bandwidth") + ulink.avail_nw_bandwidth = ul.get("avail_bandwidth") + + up_links[ulk] = ulink + + switch.up_links = up_links + + peer_links = {} + pls = s.get("peer_links") + for plk, pl in pls.iteritems(): + plink = Link(plk) + plink.resource = self.switches[pl.get("resource")] + plink.nw_bandwidth = pl.get("bandwidth") + plink.avail_nw_bandwidth = pl.get("avail_bandwidth") + + peer_links[plk] = plink + + switch.peer_links = peer_links + + self.logger.debug("Resource.bootstrap_from_db: switch links loaded") + else: + self.logger.error("Resource.bootstrap_from_db: fail loading switches") + # return False + + # storage_hosts + hosts = _resource_status.get("hosts") + if hosts: + for hk, h in hosts.iteritems(): + host = Host(hk) + host.tag = h.get("tag") + host.status = h.get("status") + host.state = h.get("state") + host.vCPUs = h.get("vCPUs") + host.original_vCPUs = h.get("original_vCPUs") + host.avail_vCPUs = h.get("avail_vCPUs") + host.mem_cap = h.get("mem") + host.original_mem_cap = h.get("original_mem") + host.avail_mem_cap = h.get("avail_mem") + host.local_disk_cap = h.get("local_disk") + host.original_local_disk_cap = h.get("original_local_disk") + host.avail_local_disk_cap = h.get("avail_local_disk") + host.vCPUs_used = h.get("vCPUs_used") + host.free_mem_mb = h.get("free_mem_mb") + host.free_disk_gb = h.get("free_disk_gb") + host.disk_available_least = h.get("disk_available_least") + host.vm_list = h.get("vm_list") + host.volume_list = h.get("volume_list", []) + + for lgk in h["membership_list"]: + host.memberships[lgk] = self.logical_groups[lgk] + + for sk in h.get("switch_list", []): + host.switches[sk] = self.switches[sk] + + # host.storages + + self.hosts[hk] = host + + if len(self.hosts) > 0: + self.logger.debug("Resource.bootstrap_from_db: hosts loaded") + else: + self.logger.error("Resource.bootstrap_from_db: fail loading hosts") + # return False + + host_groups = _resource_status.get("host_groups") + if host_groups: + for hgk, hg in host_groups.iteritems(): + host_group = HostGroup(hgk) + host_group.host_type = hg.get("host_type") + host_group.status = hg.get("status") + host_group.vCPUs = hg.get("vCPUs") + host_group.original_vCPUs = hg.get("original_vCPUs") + host_group.avail_vCPUs = hg.get("avail_vCPUs") + host_group.mem_cap = hg.get("mem") + host_group.original_mem_cap = hg.get("original_mem") + host_group.avail_mem_cap = hg.get("avail_mem") + host_group.local_disk_cap = hg.get("local_disk") + host_group.original_local_disk_cap = hg.get("original_local_disk") + host_group.avail_local_disk_cap = hg.get("avail_local_disk") + host_group.vm_list = hg.get("vm_list") + host_group.volume_list = hg.get("volume_list", []) + + for lgk in hg.get("membership_list"): + host_group.memberships[lgk] = self.logical_groups[lgk] + + for sk in hg.get("switch_list", []): + host_group.switches[sk] = self.switches[sk] + + # host.storages + + self.host_groups[hgk] = host_group + + if len(self.host_groups) > 0: + self.logger.debug("Resource.bootstrap_from_db: host_groups loaded") + else: + self.logger.error("Resource.bootstrap_from_db: fail loading host_groups") + # return False + + dc = _resource_status.get("datacenter") + if dc: + self.datacenter.name = dc.get("name") + self.datacenter.region_code_list = dc.get("region_code_list") + self.datacenter.status = dc.get("status") + self.datacenter.vCPUs = dc.get("vCPUs") + self.datacenter.original_vCPUs = dc.get("original_vCPUs") + self.datacenter.avail_vCPUs = dc.get("avail_vCPUs") + self.datacenter.mem_cap = dc.get("mem") + self.datacenter.original_mem_cap = dc.get("original_mem") + self.datacenter.avail_mem_cap = dc.get("avail_mem") + self.datacenter.local_disk_cap = dc.get("local_disk") + self.datacenter.original_local_disk_cap = dc.get("original_local_disk") + self.datacenter.avail_local_disk_cap = dc.get("avail_local_disk") + self.datacenter.vm_list = dc.get("vm_list") + self.datacenter.volume_list = dc.get("volume_list", []) + + for lgk in dc.get("membership_list"): + self.datacenter.memberships[lgk] = self.logical_groups[lgk] + + for sk in dc.get("switch_list", []): + self.datacenter.root_switches[sk] = self.switches[sk] + + # host.storages + + for ck in dc.get("children"): + if ck in self.host_groups.keys(): + self.datacenter.resources[ck] = self.host_groups[ck] + elif ck in self.hosts.keys(): + self.datacenter.resources[ck] = self.hosts[ck] + + if len(self.datacenter.resources) > 0: + self.logger.debug("Resource.bootstrap_from_db: datacenter loaded") + else: + self.logger.error("Resource.bootstrap_from_db: fail loading datacenter") + # return False + + hgs = _resource_status.get("host_groups") + if hgs: + for hgk, hg in hgs.iteritems(): + host_group = self.host_groups[hgk] + + pk = hg.get("parent") + if pk == self.datacenter.name: + host_group.parent_resource = self.datacenter + elif pk in self.host_groups.keys(): + host_group.parent_resource = self.host_groups[pk] + + for ck in hg.get("children"): + if ck in self.hosts.keys(): + host_group.child_resources[ck] = self.hosts[ck] + elif ck in self.host_groups.keys(): + host_group.child_resources[ck] = self.host_groups[ck] + + self.logger.debug("Resource.bootstrap_from_db: host_groups'layout loaded") + + hs = _resource_status.get("hosts") + if hs: + for hk, h in hs.iteritems(): + host = self.hosts[hk] + + pk = h.get("parent") + if pk == self.datacenter.name: + host.host_group = self.datacenter + elif pk in self.host_groups.keys(): + host.host_group = self.host_groups[pk] + + self.logger.debug("Resource.bootstrap_from_db: hosts'layout loaded") + + self._update_compute_avail() + self._update_storage_avail() + self._update_nw_bandwidth_avail() + + self.logger.debug("Resource.bootstrap_from_db: resource availability updated") + + except Exception: + self.logger.error("Resource.bootstrap_from_db - FAILED:" + traceback.format_exc()) + + return True + + def update_topology(self, store=True): + self._update_topology() + + self._update_compute_avail() + self._update_storage_avail() + self._update_nw_bandwidth_avail() + + if store is False: + return True + + ct = self._store_topology_updates() + if ct is None: + return False + else: + self.current_timestamp = ct + return True + + def _update_topology(self): + for level in LEVELS: + for _, host_group in self.host_groups.iteritems(): + if host_group.host_type == level and host_group.check_availability() is True: + if host_group.last_update > self.current_timestamp: + self._update_host_group_topology(host_group) + + if self.datacenter.last_update > self.current_timestamp: + self._update_datacenter_topology() + + def _update_host_group_topology(self, _host_group): + _host_group.init_resources() + del _host_group.vm_list[:] + del _host_group.volume_list[:] + _host_group.storages.clear() + + for _, host in _host_group.child_resources.iteritems(): + if host.check_availability() is True: + _host_group.vCPUs += host.vCPUs + _host_group.original_vCPUs += host.original_vCPUs + _host_group.avail_vCPUs += host.avail_vCPUs + _host_group.mem_cap += host.mem_cap + _host_group.original_mem_cap += host.original_mem_cap + _host_group.avail_mem_cap += host.avail_mem_cap + _host_group.local_disk_cap += host.local_disk_cap + _host_group.original_local_disk_cap += host.original_local_disk_cap + _host_group.avail_local_disk_cap += host.avail_local_disk_cap + + for shk, storage_host in host.storages.iteritems(): + if storage_host.status == "enabled": + _host_group.storages[shk] = storage_host + + for vm_id in host.vm_list: + _host_group.vm_list.append(vm_id) + + for vol_name in host.volume_list: + _host_group.volume_list.append(vol_name) + + _host_group.init_memberships() + + for _, host in _host_group.child_resources.iteritems(): + if host.check_availability() is True: + for mk in host.memberships.keys(): + _host_group.memberships[mk] = host.memberships[mk] + + def _update_datacenter_topology(self): + self.datacenter.init_resources() + del self.datacenter.vm_list[:] + del self.datacenter.volume_list[:] + self.datacenter.storages.clear() + self.datacenter.memberships.clear() + + for _, resource in self.datacenter.resources.iteritems(): + if resource.check_availability() is True: + self.datacenter.vCPUs += resource.vCPUs + self.datacenter.original_vCPUs += resource.original_vCPUs + self.datacenter.avail_vCPUs += resource.avail_vCPUs + self.datacenter.mem_cap += resource.mem_cap + self.datacenter.original_mem_cap += resource.original_mem_cap + self.datacenter.avail_mem_cap += resource.avail_mem_cap + self.datacenter.local_disk_cap += resource.local_disk_cap + self.datacenter.original_local_disk_cap += resource.original_local_disk_cap + self.datacenter.avail_local_disk_cap += resource.avail_local_disk_cap + + for shk, storage_host in resource.storages.iteritems(): + if storage_host.status == "enabled": + self.datacenter.storages[shk] = storage_host + + for vm_name in resource.vm_list: + self.datacenter.vm_list.append(vm_name) + + for vol_name in resource.volume_list: + self.datacenter.volume_list.append(vol_name) + + for mk in resource.memberships.keys(): + self.datacenter.memberships[mk] = resource.memberships[mk] + + def _update_compute_avail(self): + self.CPU_avail = self.datacenter.avail_vCPUs + self.mem_avail = self.datacenter.avail_mem_cap + self.local_disk_avail = self.datacenter.avail_local_disk_cap + + def _update_storage_avail(self): + self.disk_avail = 0 + + for _, storage_host in self.storage_hosts.iteritems(): + if storage_host.status == "enabled": + self.disk_avail += storage_host.avail_disk_cap + + def _update_nw_bandwidth_avail(self): + self.nw_bandwidth_avail = 0 + + level = "leaf" + for _, s in self.switches.iteritems(): + if s.status == "enabled": + if level == "leaf": + if s.switch_type == "ToR" or s.switch_type == "spine": + level = s.switch_type + elif level == "ToR": + if s.switch_type == "spine": + level = s.switch_type + + if level == "leaf": + self.nw_bandwidth_avail = sys.maxint + elif level == "ToR": + for _, h in self.hosts.iteritems(): + if h.status == "enabled" and h.state == "up" and \ + ("nova" in h.tag) and ("infra" in h.tag): + avail_nw_bandwidth_list = [sys.maxint] + for sk, s in h.switches.iteritems(): + if s.status == "enabled": + for ulk, ul in s.up_links.iteritems(): + avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth) + self.nw_bandwidth_avail += min(avail_nw_bandwidth_list) + elif level == "spine": + for _, hg in self.host_groups.iteritems(): + if hg.host_type == "rack" and hg.status == "enabled": + avail_nw_bandwidth_list = [sys.maxint] + for _, s in hg.switches.iteritems(): + if s.status == "enabled": + for _, ul in s.up_links.iteritems(): + avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth) + # NOTE: peer links? + self.nw_bandwidth_avail += min(avail_nw_bandwidth_list) + + def _store_topology_updates(self): + last_update_time = self.current_timestamp + + flavor_updates = {} + logical_group_updates = {} + storage_updates = {} + switch_updates = {} + host_updates = {} + host_group_updates = {} + datacenter_update = None + + for fk, flavor in self.flavors.iteritems(): + if flavor.last_update > self.current_timestamp: + flavor_updates[fk] = flavor.get_json_info() + + last_update_time = flavor.last_update + + for lgk, lg in self.logical_groups.iteritems(): + if lg.last_update > self.current_timestamp: + logical_group_updates[lgk] = lg.get_json_info() + + last_update_time = lg.last_update + + for shk, storage_host in self.storage_hosts.iteritems(): + if storage_host.last_update > self.current_timestamp or \ + storage_host.last_cap_update > self.current_timestamp: + storage_updates[shk] = storage_host.get_json_info() + + if storage_host.last_update > self.current_time_stamp: + last_update_time = storage_host.last_update + if storage_host.last_cap_update > self.current_timestamp: + last_update_time = storage_host.last_cap_update + + for sk, s in self.switches.iteritems(): + if s.last_update > self.current_timestamp: + switch_updates[sk] = s.get_json_info() + + last_update_time = s.last_update + + for hk, host in self.hosts.iteritems(): + if host.last_update > self.current_timestamp or host.last_link_update > self.current_timestamp: + host_updates[hk] = host.get_json_info() + + if host.last_update > self.current_timestamp: + last_update_time = host.last_update + if host.last_link_update > self.current_timestamp: + last_update_time = host.last_link_update + + for hgk, host_group in self.host_groups.iteritems(): + if host_group.last_update > self.current_timestamp or \ + host_group.last_link_update > self.current_timestamp: + host_group_updates[hgk] = host_group.get_json_info() + + if host_group.last_update > self.current_timestamp: + last_update_time = host_group.last_update + if host_group.last_link_update > self.current_timestamp: + last_update_time = host_group.last_link_update + + if self.datacenter.last_update > self.current_timestamp or \ + self.datacenter.last_link_update > self.current_timestamp: + datacenter_update = self.datacenter.get_json_info() + + if self.datacenter.last_update > self.current_timestamp: + last_update_time = self.datacenter.last_update + if self.datacenter.last_link_update > self.current_timestamp: + last_update_time = self.datacenter.last_link_update + + (resource_logfile, last_index, mode) = util.get_last_logfile(self.config.resource_log_loc, + self.config.max_log_size, + self.config.max_num_of_logs, + self.datacenter.name, + self.last_log_index) + self.last_log_index = last_index + + logging = open(self.config.resource_log_loc + resource_logfile, mode) + + json_logging = {} + json_logging['timestamp'] = last_update_time + + if len(flavor_updates) > 0: + json_logging['flavors'] = flavor_updates + if len(logical_group_updates) > 0: + json_logging['logical_groups'] = logical_group_updates + if len(storage_updates) > 0: + json_logging['storages'] = storage_updates + if len(switch_updates) > 0: + json_logging['switches'] = switch_updates + if len(host_updates) > 0: + json_logging['hosts'] = host_updates + if len(host_group_updates) > 0: + json_logging['host_groups'] = host_group_updates + if datacenter_update is not None: + json_logging['datacenter'] = datacenter_update + + logged_data = json.dumps(json_logging) + + logging.write(logged_data) + logging.write("\n") + + logging.close() + + self.logger.info("Resource._store_topology_updates: log resource status in " + resource_logfile) + + if self.db is not None: + if self.db.update_resource_status(self.datacenter.name, json_logging) is False: + return None + if self.db.update_resource_log_index(self.datacenter.name, self.last_log_index) is False: + return None + + return last_update_time + + def update_rack_resource(self, _host): + rack = _host.host_group + + if rack is not None: + rack.last_update = time.time() + + if isinstance(rack, HostGroup): + self.update_cluster_resource(rack) + + def update_cluster_resource(self, _rack): + cluster = _rack.parent_resource + + if cluster is not None: + cluster.last_update = time.time() + + if isinstance(cluster, HostGroup): + self.datacenter.last_update = time.time() + + def get_uuid(self, _h_uuid, _host_name): + host = self.hosts[_host_name] + + return host.get_uuid(_h_uuid) + + def add_vm_to_host(self, _host_name, _vm_id, _vcpus, _mem, _ldisk): + host = self.hosts[_host_name] + + host.vm_list.append(_vm_id) + + host.avail_vCPUs -= _vcpus + host.avail_mem_cap -= _mem + host.avail_local_disk_cap -= _ldisk + + host.vCPUs_used += _vcpus + host.free_mem_mb -= _mem + host.free_disk_gb -= _ldisk + host.disk_available_least -= _ldisk + + def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem, _ldisk): + host = self.hosts[_host_name] + + host.remove_vm_by_h_uuid(_h_uuid) + + host.avail_vCPUs += _vcpus + host.avail_mem_cap += _mem + host.avail_local_disk_cap += _ldisk + + host.vCPUs_used -= _vcpus + host.free_mem_mb += _mem + host.free_disk_gb += _ldisk + host.disk_available_least += _ldisk + + def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem, _ldisk): + host = self.hosts[_host_name] + + host.remove_vm_by_uuid(_uuid) + + host.avail_vCPUs += _vcpus + host.avail_mem_cap += _mem + host.avail_local_disk_cap += _ldisk + + host.vCPUs_used -= _vcpus + host.free_mem_mb += _mem + host.free_disk_gb += _ldisk + host.disk_available_least += _ldisk + + def add_vol_to_host(self, _host_name, _storage_name, _v_id, _disk): + host = self.hosts[_host_name] + + host.volume_list.append(_v_id) + + storage_host = self.storage_hosts[_storage_name] + storage_host.volume_list.append(_v_id) + + storage_host.avail_disk_cap -= _disk + + # NOTE: Assume the up-link of spine switch is not used except out-going from datacenter + # NOTE: What about peer-switches? + def deduct_bandwidth(self, _host_name, _placement_level, _bandwidth): + host = self.hosts[_host_name] + + if _placement_level == "host": + self._deduct_host_bandwidth(host, _bandwidth) + + elif _placement_level == "rack": + self._deduct_host_bandwidth(host, _bandwidth) + + rack = host.host_group + if not isinstance(rack, Datacenter): + self._deduct_host_bandwidth(rack, _bandwidth) + + elif _placement_level == "cluster": + self._deduct_host_bandwidth(host, _bandwidth) + + rack = host.host_group + self._deduct_host_bandwidth(rack, _bandwidth) + + cluster = rack.parent_resource + for _, s in cluster.switches.iteritems(): + if s.switch_type == "spine": + for _, ul in s.up_links.iteritems(): + ul.avail_nw_bandwidth -= _bandwidth + + s.last_update = time.time() + + def _deduct_host_bandwidth(self, _host, _bandwidth): + for _, hs in _host.switches.iteritems(): + for _, ul in hs.up_links.iteritems(): + ul.avail_nw_bandwidth -= _bandwidth + + hs.last_update = time.time() + + def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem, _ldisk, _fldisk, _avail_least): + updated = False + + host = self.hosts[_hn] + + if host.status != _st: + host.status = _st + self.logger.debug("Resource.update_host_resources: host status changed") + updated = True + + if host.original_vCPUs != _vcpus or \ + host.vCPUs_used != _vcpus_used: + self.logger.debug("Resource.update_host_resources: host cpu changed") + host.original_vCPUs = _vcpus + host.vCPUs_used = _vcpus_used + updated = True + + if host.free_mem_mb != _fmem or \ + host.original_mem_cap != _mem: + self.logger.debug("Resource.update_host_resources: host mem changed") + host.free_mem_mb = _fmem + host.original_mem_cap = _mem + updated = True + + if host.free_disk_gb != _fldisk or \ + host.original_local_disk_cap != _ldisk or \ + host.disk_available_least != _avail_least: + self.logger.debug("Resource.update_host_resources: host disk changed") + host.free_disk_gb = _fldisk + host.original_local_disk_cap = _ldisk + host.disk_available_least = _avail_least + updated = True + + if updated is True: + self.compute_avail_resources(_hn, host) + + return updated + + def update_host_time(self, _host_name): + host = self.hosts[_host_name] + + host.last_update = time.time() + self.update_rack_resource(host) + + def update_storage_time(self, _storage_name): + storage_host = self.storage_hosts[_storage_name] + + storage_host.last_cap_update = time.time() + + def add_logical_group(self, _host_name, _lg_name, _lg_type): + host = None + if _host_name in self.hosts.keys(): + host = self.hosts[_host_name] + else: + host = self.host_groups[_host_name] + + if host is not None: + if _lg_name not in self.logical_groups.keys(): + logical_group = LogicalGroup(_lg_name) + logical_group.group_type = _lg_type + logical_group.last_update = time.time() + self.logical_groups[_lg_name] = logical_group + + if _lg_name not in host.memberships.keys(): + host.memberships[_lg_name] = self.logical_groups[_lg_name] + + if isinstance(host, HostGroup): + host.last_update = time.time() + + self.update_cluster_resource(host) + + def add_vm_to_logical_groups(self, _host, _vm_id, _logical_groups_of_vm): + for lgk in _host.memberships.keys(): + if lgk in _logical_groups_of_vm: + lg = self.logical_groups[lgk] + + if isinstance(_host, Host): + if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: + lg.last_update = time.time() + elif isinstance(_host, HostGroup): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == _host.host_type: + if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: + lg.last_update = time.time() + + if isinstance(_host, Host) and _host.host_group is not None: + self.add_vm_to_logical_groups(_host.host_group, _vm_id, _logical_groups_of_vm) + elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + self.add_vm_to_logical_groups(_host.parent_resource, _vm_id, _logical_groups_of_vm) + + def remove_vm_by_h_uuid_from_logical_groups(self, _host, _h_uuid): + for lgk in _host.memberships.keys(): + if lgk not in self.logical_groups.keys(): + continue + lg = self.logical_groups[lgk] + + if isinstance(_host, Host): + if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True: + lg.last_update = time.time() + + if _host.remove_membership(lg) is True: + _host.last_update = time.time() + + elif isinstance(_host, HostGroup): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == _host.host_type: + if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True: + lg.last_update = time.time() + + if _host.remove_membership(lg) is True: + _host.last_update = time.time() + + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if len(lg.vm_list) == 0: + del self.logical_groups[lgk] + + if isinstance(_host, Host) and _host.host_group is not None: + self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group, _h_uuid) + elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource, _h_uuid) + + def remove_vm_by_uuid_from_logical_groups(self, _host, _uuid): + for lgk in _host.memberships.keys(): + if lgk not in self.logical_groups.keys(): + continue + lg = self.logical_groups[lgk] + + if isinstance(_host, Host): + if lg.remove_vm_by_uuid(_uuid, _host.name) is True: + lg.last_update = time.time() + + if _host.remove_membership(lg) is True: + _host.last_update = time.time() + + elif isinstance(_host, HostGroup): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == _host.host_type: + if lg.remove_vm_by_uuid(_uuid, _host.name) is True: + lg.last_update = time.time() + + if _host.remove_membership(lg) is True: + _host.last_update = time.time() + + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if len(lg.vm_list) == 0: + del self.logical_groups[lgk] + + if isinstance(_host, Host) and _host.host_group is not None: + self.remove_vm_by_uuid_from_logical_groups(_host.host_group, _uuid) + elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource, _uuid) + + def clean_none_vms_from_logical_groups(self, _host): + for lgk in _host.memberships.keys(): + if lgk not in self.logical_groups.keys(): + continue + lg = self.logical_groups[lgk] + + if isinstance(_host, Host): + if lg.clean_none_vms(_host.name) is True: + lg.last_update = time.time() + + if _host.remove_membership(lg) is True: + _host.last_update = time.time() + + elif isinstance(_host, HostGroup): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == _host.host_type: + if lg.clean_none_vms(_host.name) is True: + lg.last_update = time.time() + + if _host.remove_membership(lg) is True: + _host.last_update = time.time() + + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if len(lg.vm_list) == 0: + del self.logical_groups[lgk] + + if isinstance(_host, Host) and _host.host_group is not None: + self.clean_none_vms_from_logical_groups(_host.host_group) + elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + self.clean_none_vms_from_logical_groups(_host.parent_resource) + + def update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host): + for lgk in _host.memberships.keys(): + lg = self.logical_groups[lgk] + + if isinstance(_host, Host): + if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: + lg.last_update = time.time() + elif isinstance(_host, HostGroup): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == _host.host_type: + if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: + lg.last_update = time.time() + + if isinstance(_host, Host) and _host.host_group is not None: + self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) + elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) + + def update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host): + for lgk in _host.memberships.keys(): + lg = self.logical_groups[lgk] + + if isinstance(_host, Host): + if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: + lg.last_update = time.time() + elif isinstance(_host, HostGroup): + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lgk.split(":")[0] == _host.host_type: + if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: + lg.last_update = time.time() + + if isinstance(_host, Host) and _host.host_group is not None: + self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) + elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) + + def compute_avail_resources(self, hk, host): + ram_allocation_ratio_list = [] + cpu_allocation_ratio_list = [] + disk_allocation_ratio_list = [] + + for _, lg in host.memberships.iteritems(): + if lg.group_type == "AGGR": + if "ram_allocation_ratio" in lg.metadata.keys(): + ram_allocation_ratio_list.append(float(lg.metadata["ram_allocation_ratio"])) + if "cpu_allocation_ratio" in lg.metadata.keys(): + cpu_allocation_ratio_list.append(float(lg.metadata["cpu_allocation_ratio"])) + if "disk_allocation_ratio" in lg.metadata.keys(): + disk_allocation_ratio_list.append(float(lg.metadata["disk_allocation_ratio"])) + + ram_allocation_ratio = 1.0 + if len(ram_allocation_ratio_list) > 0: + ram_allocation_ratio = min(ram_allocation_ratio_list) + else: + if self.config.default_ram_allocation_ratio > 0: + ram_allocation_ratio = self.config.default_ram_allocation_ratio + + static_ram_standby_ratio = 0 + if self.config.static_mem_standby_ratio > 0: + static_ram_standby_ratio = float(self.config.static_mem_standby_ratio) / float(100) + + host.compute_avail_mem(ram_allocation_ratio, static_ram_standby_ratio) + + self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_mem = " + + str(host.mem_cap) + ", avail_mem = " + str(host.avail_mem_cap)) + + cpu_allocation_ratio = 1.0 + if len(cpu_allocation_ratio_list) > 0: + cpu_allocation_ratio = min(cpu_allocation_ratio_list) + else: + if self.config.default_cpu_allocation_ratio > 0: + cpu_allocation_ratio = self.config.default_cpu_allocation_ratio + + static_cpu_standby_ratio = 0 + if self.config.static_cpu_standby_ratio > 0: + static_cpu_standby_ratio = float(self.config.static_cpu_standby_ratio) / float(100) + + host.compute_avail_vCPUs(cpu_allocation_ratio, static_cpu_standby_ratio) + + self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_vCPUs = " + + str(host.vCPUs) + ", avail_vCPUs = " + str(host.avail_vCPUs)) + + disk_allocation_ratio = 1.0 + if len(disk_allocation_ratio_list) > 0: + disk_allocation_ratio = min(disk_allocation_ratio_list) + else: + if self.config.default_disk_allocation_ratio > 0: + disk_allocation_ratio = self.config.default_disk_allocation_ratio + + static_disk_standby_ratio = 0 + if self.config.static_local_disk_standby_ratio > 0: + static_disk_standby_ratio = float(self.config.static_local_disk_standby_ratio) / float(100) + + host.compute_avail_disk(disk_allocation_ratio, static_disk_standby_ratio) + + self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_local_disk = " + + str(host.local_disk_cap) + ", avail_local_disk = " + str(host.avail_local_disk_cap)) + + def get_flavor(self, _name): + flavor = None + + if _name in self.flavors.keys(): + if self.flavors[_name].status == "enabled": + flavor = self.flavors[_name] + + return flavor diff --git a/valet/engine/resource_manager/resource_base.py b/valet/engine/resource_manager/resource_base.py new file mode 100755 index 0000000..a013702 --- /dev/null +++ b/valet/engine/resource_manager/resource_base.py @@ -0,0 +1,684 @@ +#!/bin/python + +# Modified: Sep. 27, 2016 + + +from valet.engine.optimizer.app_manager.app_topology_base import LEVELS + + +class Datacenter(object): + + def __init__(self, _name): + self.name = _name + + self.region_code_list = [] + + self.status = "enabled" + + self.memberships = {} # all available logical groups (e.g., aggregate) in the datacenter + + self.vCPUs = 0 + self.original_vCPUs = 0 + self.avail_vCPUs = 0 + self.mem_cap = 0 # MB + self.original_mem_cap = 0 + self.avail_mem_cap = 0 + self.local_disk_cap = 0 # GB, ephemeral + self.original_local_disk_cap = 0 + self.avail_local_disk_cap = 0 + + self.root_switches = {} + self.storages = {} + + self.resources = {} + + self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.volume_list = [] # a list of placed volumes + + self.last_update = 0 + self.last_link_update = 0 + + def init_resources(self): + self.vCPUs = 0 + self.original_vCPUs = 0 + self.avail_vCPUs = 0 + self.mem_cap = 0 # MB + self.original_mem_cap = 0 + self.avail_mem_cap = 0 + self.local_disk_cap = 0 # GB, ephemeral + self.original_local_disk_cap = 0 + self.avail_local_disk_cap = 0 + + def get_json_info(self): + membership_list = [] + for lgk in self.memberships.keys(): + membership_list.append(lgk) + + switch_list = [] + for sk in self.root_switches.keys(): + switch_list.append(sk) + + storage_list = [] + for shk in self.storages.keys(): + storage_list.append(shk) + + child_list = [] + for ck in self.resources.keys(): + child_list.append(ck) + + return {'status': self.status, + 'name': self.name, + 'region_code_list': self.region_code_list, + 'membership_list': membership_list, + 'vCPUs': self.vCPUs, + 'original_vCPUs': self.original_vCPUs, + 'avail_vCPUs': self.avail_vCPUs, + 'mem': self.mem_cap, + 'original_mem': self.original_mem_cap, + 'avail_mem': self.avail_mem_cap, + 'local_disk': self.local_disk_cap, + 'original_local_disk': self.original_local_disk_cap, + 'avail_local_disk': self.avail_local_disk_cap, + 'switch_list': switch_list, + 'storage_list': storage_list, + 'children': child_list, + 'vm_list': self.vm_list, + 'volume_list': self.volume_list, + 'last_update': self.last_update, + 'last_link_update': self.last_link_update} + + +# data container for rack or cluster +class HostGroup(object): + + def __init__(self, _id): + self.name = _id + self.host_type = "rack" # rack or cluster(e.g., power domain, zone) + + self.status = "enabled" + + self.memberships = {} # all available logical groups (e.g., aggregate) in this group + + self.vCPUs = 0 + self.original_vCPUs = 0 + self.avail_vCPUs = 0 + self.mem_cap = 0 # MB + self.original_mem_cap = 0 + self.avail_mem_cap = 0 + self.local_disk_cap = 0 # GB, ephemeral + self.original_local_disk_cap = 0 + self.avail_local_disk_cap = 0 + + self.switches = {} # ToRs + self.storages = {} + + self.parent_resource = None # e.g., datacenter + self.child_resources = {} # e.g., hosting servers + + self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.volume_list = [] # a list of placed volumes + + self.last_update = 0 + self.last_link_update = 0 + + def init_resources(self): + self.vCPUs = 0 + self.original_vCPUs = 0 + self.avail_vCPUs = 0 + self.mem_cap = 0 # MB + self.original_mem_cap = 0 + self.avail_mem_cap = 0 + self.local_disk_cap = 0 # GB, ephemeral + self.original_local_disk_cap = 0 + self.avail_local_disk_cap = 0 + + def init_memberships(self): + for lgk in self.memberships.keys(): + lg = self.memberships[lgk] + if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + level = lg.name.split(":")[0] + if LEVELS.index(level) < LEVELS.index(self.host_type) or self.name not in lg.vms_per_host.keys(): + del self.memberships[lgk] + else: + del self.memberships[lgk] + + def remove_membership(self, _lg): + cleaned = False + + if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV": + if self.name not in _lg.vms_per_host.keys(): + del self.memberships[_lg.name] + cleaned = True + + return cleaned + + def check_availability(self): + if self.status == "enabled": + return True + else: + return False + + def get_json_info(self): + membership_list = [] + for lgk in self.memberships.keys(): + membership_list.append(lgk) + + switch_list = [] + for sk in self.switches.keys(): + switch_list.append(sk) + + storage_list = [] + for shk in self.storages.keys(): + storage_list.append(shk) + + child_list = [] + for ck in self.child_resources.keys(): + child_list.append(ck) + + return {'status': self.status, + 'host_type': self.host_type, + 'membership_list': membership_list, + 'vCPUs': self.vCPUs, + 'original_vCPUs': self.original_vCPUs, + 'avail_vCPUs': self.avail_vCPUs, + 'mem': self.mem_cap, + 'original_mem': self.original_mem_cap, + 'avail_mem': self.avail_mem_cap, + 'local_disk': self.local_disk_cap, + 'original_local_disk': self.original_local_disk_cap, + 'avail_local_disk': self.avail_local_disk_cap, + 'switch_list': switch_list, + 'storage_list': storage_list, + 'parent': self.parent_resource.name, + 'children': child_list, + 'vm_list': self.vm_list, + 'volume_list': self.volume_list, + 'last_update': self.last_update, + 'last_link_update': self.last_link_update} + + +class Host(object): + + def __init__(self, _name): + self.name = _name + + self.tag = [] # mark if this is synch'ed by multiple sources + self.status = "enabled" + self.state = "up" + + self.memberships = {} # logical group (e.g., aggregate) this hosting server is involved in + + self.vCPUs = 0 + self.original_vCPUs = 0 + self.avail_vCPUs = 0 + self.mem_cap = 0 # MB + self.original_mem_cap = 0 + self.avail_mem_cap = 0 + self.local_disk_cap = 0 # GB, ephemeral + self.original_local_disk_cap = 0 + self.avail_local_disk_cap = 0 + + self.vCPUs_used = 0 + self.free_mem_mb = 0 + self.free_disk_gb = 0 + self.disk_available_least = 0 + + self.switches = {} # leaf + self.storages = {} + + self.host_group = None # e.g., rack + + self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.volume_list = [] # a list of placed volumes + + self.last_update = 0 + self.last_link_update = 0 + + def clean_memberships(self): + cleaned = False + + for lgk in self.memberships.keys(): + lg = self.memberships[lgk] + if self.name not in lg.vms_per_host.keys(): + del self.memberships[lgk] + cleaned = True + + return cleaned + + def remove_membership(self, _lg): + cleaned = False + + if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV": + if self.name not in _lg.vms_per_host.keys(): + del self.memberships[_lg.name] + cleaned = True + + return cleaned + + def check_availability(self): + if self.status == "enabled" and self.state == "up" and ("nova" in self.tag) and ("infra" in self.tag): + return True + else: + return False + + def get_uuid(self, _h_uuid): + uuid = None + + for vm_id in self.vm_list: + if vm_id[0] == _h_uuid: + uuid = vm_id[2] + break + + return uuid + + def exist_vm_by_h_uuid(self, _h_uuid): + exist = False + + for vm_id in self.vm_list: + if vm_id[0] == _h_uuid: + exist = True + break + + return exist + + def exist_vm_by_uuid(self, _uuid): + exist = False + + for vm_id in self.vm_list: + if vm_id[2] == _uuid: + exist = True + break + + return exist + + def remove_vm_by_h_uuid(self, _h_uuid): + success = False + + for vm_id in self.vm_list: + if vm_id[0] == _h_uuid: + self.vm_list.remove(vm_id) + success = True + break + + return success + + def remove_vm_by_uuid(self, _uuid): + success = False + + for vm_id in self.vm_list: + if vm_id[2] == _uuid: + self.vm_list.remove(vm_id) + success = True + break + + return success + + def update_uuid(self, _h_uuid, _uuid): + success = False + + vm_name = "none" + for vm_id in self.vm_list: + if vm_id[0] == _h_uuid: + vm_name = vm_id[1] + self.vm_list.remove(vm_id) + success = True + break + + if success is True: + vm_id = (_h_uuid, vm_name, _uuid) + self.vm_list.append(vm_id) + + return success + + def update_h_uuid(self, _h_uuid, _uuid): + success = False + + vm_name = "none" + for vm_id in self.vm_list: + if vm_id[2] == _uuid: + vm_name = vm_id[1] + self.vm_list.remove(vm_id) + success = True + break + + if success is True: + vm_id = (_h_uuid, vm_name, _uuid) + self.vm_list.append(vm_id) + + return success + + def compute_avail_vCPUs(self, _overcommit_ratio, _standby_ratio): + self.vCPUs = self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio) + + self.avail_vCPUs = self.vCPUs - self.vCPUs_used + + def compute_avail_mem(self, _overcommit_ratio, _standby_ratio): + self.mem_cap = self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio) + + used_mem_mb = self.original_mem_cap - self.free_mem_mb + + self.avail_mem_cap = self.mem_cap - used_mem_mb + + def compute_avail_disk(self, _overcommit_ratio, _standby_ratio): + self.local_disk_cap = self.original_local_disk_cap * _overcommit_ratio * (1.0 - _standby_ratio) + + free_disk_cap = self.free_disk_gb + if self.disk_available_least > 0: + free_disk_cap = min(self.free_disk_gb, self.disk_available_least) + + used_disk_cap = self.original_local_disk_cap - free_disk_cap + + self.avail_local_disk_cap = self.local_disk_cap - used_disk_cap + + def get_json_info(self): + membership_list = [] + for lgk in self.memberships.keys(): + membership_list.append(lgk) + + switch_list = [] + for sk in self.switches.keys(): + switch_list.append(sk) + + storage_list = [] + for shk in self.storages.keys(): + storage_list.append(shk) + + return {'tag': self.tag, 'status': self.status, 'state': self.state, + 'membership_list': membership_list, + 'vCPUs': self.vCPUs, + 'original_vCPUs': self.original_vCPUs, + 'avail_vCPUs': self.avail_vCPUs, + 'mem': self.mem_cap, + 'original_mem': self.original_mem_cap, + 'avail_mem': self.avail_mem_cap, + 'local_disk': self.local_disk_cap, + 'original_local_disk': self.original_local_disk_cap, + 'avail_local_disk': self.avail_local_disk_cap, + 'vCPUs_used': self.vCPUs_used, + 'free_mem_mb': self.free_mem_mb, + 'free_disk_gb': self.free_disk_gb, + 'disk_available_least': self.disk_available_least, + 'switch_list': switch_list, + 'storage_list': storage_list, + 'parent': self.host_group.name, + 'vm_list': self.vm_list, + 'volume_list': self.volume_list, + 'last_update': self.last_update, + 'last_link_update': self.last_link_update} + + +class LogicalGroup(object): + + def __init__(self, _name): + self.name = _name + self.group_type = "AGGR" # AGGR, AZ, INTG, EX, DIV, or AFF + + self.status = "enabled" + + self.metadata = {} # any metadata to be matched when placing nodes + + self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.volume_list = [] # a list of placed volumes + + self.vms_per_host = {} # key = host_id, value = a list of placed vms + + self.last_update = 0 + + def exist_vm_by_h_uuid(self, _h_uuid): + exist = False + + for vm_id in self.vm_list: + if vm_id[0] == _h_uuid: + exist = True + break + + return exist + + def exist_vm_by_uuid(self, _uuid): + exist = False + + for vm_id in self.vm_list: + if vm_id[2] == _uuid: + exist = True + break + + return exist + + def update_uuid(self, _h_uuid, _uuid, _host_id): + success = False + + vm_name = "none" + for vm_id in self.vm_list: + if vm_id[0] == _h_uuid: + vm_name = vm_id[1] + self.vm_list.remove(vm_id) + success = True + break + + if _host_id in self.vms_per_host.keys(): + for host_vm_id in self.vms_per_host[_host_id]: + if host_vm_id[0] == _h_uuid: + self.vms_per_host[_host_id].remove(host_vm_id) + success = True + break + + if success is True: + vm_id = (_h_uuid, vm_name, _uuid) + self.vm_list.append(vm_id) + if _host_id in self.vms_per_host.keys(): + self.vms_per_host[_host_id].append(vm_id) + + return success + + def update_h_uuid(self, _h_uuid, _uuid, _host_id): + success = False + + vm_name = "none" + for vm_id in self.vm_list: + if vm_id[2] == _uuid: + vm_name = vm_id[1] + self.vm_list.remove(vm_id) + success = True + break + + if _host_id in self.vms_per_host.keys(): + for host_vm_id in self.vms_per_host[_host_id]: + if host_vm_id[2] == _uuid: + self.vms_per_host[_host_id].remove(host_vm_id) + success = True + break + + if success is True: + vm_id = (_h_uuid, vm_name, _uuid) + self.vm_list.append(vm_id) + if _host_id in self.vms_per_host.keys(): + self.vms_per_host[_host_id].append(vm_id) + + return success + + def add_vm_by_h_uuid(self, _vm_id, _host_id): + success = False + + if self.exist_vm_by_h_uuid(_vm_id[0]) is False: + self.vm_list.append(_vm_id) + + if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": + if _host_id not in self.vms_per_host.keys(): + self.vms_per_host[_host_id] = [] + self.vms_per_host[_host_id].append(_vm_id) + + success = True + + return success + + def remove_vm_by_h_uuid(self, _h_uuid, _host_id): + success = False + + for vm_id in self.vm_list: + if vm_id[0] == _h_uuid: + self.vm_list.remove(vm_id) + success = True + break + + if _host_id in self.vms_per_host.keys(): + for host_vm_id in self.vms_per_host[_host_id]: + if host_vm_id[0] == _h_uuid: + self.vms_per_host[_host_id].remove(host_vm_id) + success = True + break + + if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": + if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: + del self.vms_per_host[_host_id] + + return success + + def remove_vm_by_uuid(self, _uuid, _host_id): + success = False + + for vm_id in self.vm_list: + if vm_id[2] == _uuid: + self.vm_list.remove(vm_id) + success = True + break + + if _host_id in self.vms_per_host.keys(): + for host_vm_id in self.vms_per_host[_host_id]: + if host_vm_id[2] == _uuid: + self.vms_per_host[_host_id].remove(host_vm_id) + success = True + break + + if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": + if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: + del self.vms_per_host[_host_id] + + return success + + def clean_none_vms(self, _host_id): + success = False + + for vm_id in self.vm_list: + if vm_id[2] == "none": + self.vm_list.remove(vm_id) + success = True + + if _host_id in self.vms_per_host.keys(): + for vm_id in self.vms_per_host[_host_id]: + if vm_id[2] == "none": + self.vms_per_host[_host_id].remove(vm_id) + success = True + + if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": + if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: + del self.vms_per_host[_host_id] + + return success + + def get_json_info(self): + return {'status': self.status, + 'group_type': self.group_type, + 'metadata': self.metadata, + 'vm_list': self.vm_list, + 'volume_list': self.volume_list, + 'vms_per_host': self.vms_per_host, + 'last_update': self.last_update} + + +class Switch(object): + + def __init__(self, _switch_id): + self.name = _switch_id + self.switch_type = "ToR" # root, spine, ToR, or leaf + + self.status = "enabled" + + self.up_links = {} + self.down_links = {} # currently, not used + self.peer_links = {} + + self.last_update = 0 + + def get_json_info(self): + ulinks = {} + for ulk, ul in self.up_links.iteritems(): + ulinks[ulk] = ul.get_json_info() + + plinks = {} + for plk, pl in self.peer_links.iteritems(): + plinks[plk] = pl.get_json_info() + + return {'status': self.status, + 'switch_type': self.switch_type, + 'up_links': ulinks, + 'peer_links': plinks, + 'last_update': self.last_update} + + +class Link(object): + + def __init__(self, _name): + self.name = _name # format: source + "-" + target + self.resource = None # switch beging connected to + + self.nw_bandwidth = 0 # Mbps + self.avail_nw_bandwidth = 0 + + def get_json_info(self): + return {'resource': self.resource.name, + 'bandwidth': self.nw_bandwidth, + 'avail_bandwidth': self.avail_nw_bandwidth} + + +class StorageHost(object): + + def __init__(self, _name): + self.name = _name + self.storage_class = None # tiering, e.g., platinum, gold, silver + + self.status = "enabled" + self.host_list = [] + + self.disk_cap = 0 # GB + self.avail_disk_cap = 0 + + self.volume_list = [] # list of volume names placed in this host + + self.last_update = 0 + self.last_cap_update = 0 + + def get_json_info(self): + return {'status': self.status, + 'class': self.storage_class, + 'host_list': self.host_list, + 'disk': self.disk_cap, + 'avail_disk': self.avail_disk_cap, + 'volume_list': self.volume_list, + 'last_update': self.last_update, + 'last_cap_update': self.last_cap_update} + + +class Flavor(object): + + def __init__(self, _name): + self.name = _name + self.flavor_id = None + + self.status = "enabled" + + self.vCPUs = 0 + self.mem_cap = 0 # MB + self.disk_cap = 0 # including ephemeral (GB) and swap (MB) + + self.extra_specs = {} + + self.last_update = 0 + + def get_json_info(self): + return {'status': self.status, + 'flavor_id': self.flavor_id, + 'vCPUs': self.vCPUs, + 'mem': self.mem_cap, + 'disk': self.disk_cap, + 'extra_specs': self.extra_specs, + 'last_update': self.last_update} diff --git a/valet/engine/resource_manager/simulation/__init__.py b/valet/engine/resource_manager/simulation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/engine/resource_manager/simulation/compute_simulator.py b/valet/engine/resource_manager/simulation/compute_simulator.py new file mode 100644 index 0000000..bcdf6d6 --- /dev/null +++ b/valet/engine/resource_manager/simulation/compute_simulator.py @@ -0,0 +1,135 @@ +#!/bin/python + + +################################################################################################################# +# Author: Gueyoung Jung +# Contact: gjung@research.att.com +# Version 2.0.2: Feb. 9, 2016 +# +# Functions +# - Simulate hosts and flavors +# +################################################################################################################# + + +from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor + + +class SimCompute(object): + + def __init__(self, _config): + self.config = _config + + def set_hosts(self, _hosts, _logical_groups): + self._set_availability_zones(_hosts, _logical_groups) + + self._set_aggregates(_hosts, _logical_groups) + + self._set_placed_vms(_hosts, _logical_groups) + + self._set_resources(_hosts) + + return "success" + + def _set_availability_zones(self, _hosts, _logical_groups): + logical_group = LogicalGroup("nova") + logical_group.group_type = "AZ" + _logical_groups[logical_group.name] = logical_group + + for r_num in range(0, self.config.num_of_racks): + + # for test + ''' + num_of_hosts = 0 + if r_num == 1: + num_of_hosts = 1 + else: + num_of_hosts = 2 + for h_num in range(0, num_of_hosts): + ''' + for h_num in range(0, self.config.num_of_hosts_per_rack): + host = Host(self.config.mode + "0r" + str(r_num) + "c" + str(h_num)) + host.tag.append("nova") + host.memberships["nova"] = logical_group + + logical_group.vms_per_host[host.name] = [] + + _hosts[host.name] = host + + def _set_aggregates(self, _hosts, _logical_groups): + for a_num in range(0, self.config.num_of_aggregates): + metadata = {} + metadata["cpu_allocation_ratio"] = "0.5" + + aggregate = LogicalGroup("aggregate" + str(a_num)) + aggregate.group_type = "AGGR" + aggregate.metadata = metadata + + _logical_groups[aggregate.name] = aggregate + + for a_num in range(0, self.config.num_of_aggregates): + aggregate = _logical_groups["aggregate" + str(a_num)] + for r_num in range(0, self.config.num_of_racks): + for h_num in range(0, self.config.num_of_hosts_per_rack): + host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num) + if host_name in _hosts.keys(): + if (h_num % (self.config.aggregated_ratio + a_num)) == 0: + host = _hosts[host_name] + host.memberships[aggregate.name] = aggregate + + aggregate.vms_per_host[host.name] = [] + + def _set_placed_vms(self, _hosts, _logical_groups): + pass + + def _set_resources(self, _hosts): + for r_num in range(0, self.config.num_of_racks): + + # for test + ''' + num_of_hosts = 0 + if r_num == 1: + num_of_hosts = 1 + else: + num_of_hosts = 2 + for h_num in range(0, num_of_hosts): + ''' + for h_num in range(0, self.config.num_of_hosts_per_rack): + host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num) + if host_name in _hosts.keys(): + host = _hosts[host_name] + # for test + ''' + if r_num == 1: + host.status = "disabled" + host.state = "down" + ''' + host.original_vCPUs = float(self.config.cpus_per_host) + host.vCPUs_used = 0.0 + host.original_mem_cap = float(self.config.mem_per_host) + host.free_mem_mb = host.original_mem_cap + host.original_local_disk_cap = float(self.config.disk_per_host) + host.free_disk_gb = host.original_local_disk_cap + host.disk_available_least = host.original_local_disk_cap + + def set_flavors(self, _flavors): + for f_num in range(0, self.config.num_of_basic_flavors): + flavor = Flavor("bflavor" + str(f_num)) + flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1)) + flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1)) + flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0 + + _flavors[flavor.name] = flavor + + for a_num in range(0, self.config.num_of_aggregates): + flavor = Flavor("sflavor" + str(a_num)) + flavor.vCPUs = self.config.base_flavor_cpus * (a_num + 1) + flavor.mem_cap = self.config.base_flavor_mem * (a_num + 1) + flavor.disk_cap = self.config.base_flavor_disk * (a_num + 1) + + # flavor.extra_specs["availability_zone"] = "nova" + flavor.extra_specs["cpu_allocation_ratio"] = "0.5" + + _flavors[flavor.name] = flavor + + return "success" diff --git a/valet/engine/resource_manager/simulation/topology_simulator.py b/valet/engine/resource_manager/simulation/topology_simulator.py new file mode 100644 index 0000000..5a47151 --- /dev/null +++ b/valet/engine/resource_manager/simulation/topology_simulator.py @@ -0,0 +1,144 @@ +#!/bin/python + + +################################################################################################################# +# Author: Gueyoung Jung +# Contact: gjung@research.att.com +# Version 2.0.2: Feb. 9, 2016 +# +# Functions +# - Simulate datacenter configurations (i.e., layout, cabling) +# +################################################################################################################# + + +from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link + + +class SimTopology(object): + + def __init__(self, _config): + self.config = _config + + def set_topology(self, _datacenter, _host_groups, _hosts, _switches): + self._set_network_topology(_switches) + self._set_host_topology(_datacenter, _host_groups, _hosts, _switches) + + return "success" + + def _set_network_topology(self, _switches): + root_switch = Switch("r0") + root_switch.switch_type = "root" + _switches[root_switch.name] = root_switch + + if self.config.num_of_spine_switches > 0: + for s_num in range(0, self.config.num_of_spine_switches): + switch = Switch(root_switch.name + "s" + str(s_num)) + switch.switch_type = "spine" + _switches[switch.name] = switch + + for r_num in range(0, self.config.num_of_racks): + switch = Switch(root_switch.name + "t" + str(r_num)) + switch.switch_type = "ToR" + _switches[switch.name] = switch + + for h_num in range(0, self.config.num_of_hosts_per_rack): + leaf_switch = Switch(switch.name + "l" + str(h_num)) + leaf_switch.switch_type = "leaf" + _switches[leaf_switch.name] = leaf_switch + + if self.config.num_of_spine_switches > 0: + for s_num in range(0, self.config.num_of_spine_switches): + s = _switches[root_switch.name + "s" + str(s_num)] + + up_link = Link(s.name + "-" + root_switch.name) + up_link.resource = root_switch + up_link.nw_bandwidth = self.config.bandwidth_of_spine + up_link.avail_nw_bandwidth = up_link.nw_bandwidth + s.up_links[up_link.name] = up_link + + if self.config.num_of_spine_switches > 1: + ps = None + if (s_num % 2) == 0: + if (s_num + 1) < self.config.num_of_spine_switches: + ps = _switches[root_switch.name + "s" + str(s_num + 1)] + else: + ps = _switches[root_switch.name + "s" + str(s_num - 1)] + if ps is not None: + peer_link = Link(s.name + "-" + ps.name) + peer_link.resource = ps + peer_link.nw_bandwidth = self.config.bandwidth_of_spine + peer_link.avail_nw_bandwidth = peer_link.nw_bandwidth + s.peer_links[peer_link.name] = peer_link + + for r_num in range(0, self.config.num_of_racks): + s = _switches[root_switch.name + "t" + str(r_num)] + + parent_switch_list = [] + if self.config.num_of_spine_switches > 0: + for s_num in range(0, self.config.num_of_spine_switches): + parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)]) + else: + parent_switch_list.append(_switches[root_switch.name]) + + for parent_switch in parent_switch_list: + up_link = Link(s.name + "-" + parent_switch.name) + up_link.resource = parent_switch + up_link.nw_bandwidth = self.config.bandwidth_of_rack + up_link.avail_nw_bandwidth = up_link.nw_bandwidth + s.up_links[up_link.name] = up_link + + if self.config.num_of_racks > 1: + ps = None + if (r_num % 2) == 0: + if (r_num + 1) < self.config.num_of_racks: + ps = _switches[root_switch.name + "t" + str(r_num + 1)] + else: + ps = _switches[root_switch.name + "t" + str(r_num - 1)] + if ps is not None: + peer_link = Link(s.name + "-" + ps.name) + peer_link.resource = ps + peer_link.nw_bandwidth = self.config.bandwidth_of_rack + peer_link.avail_nw_bandwidth = peer_link.nw_bandwidth + s.peer_links[peer_link.name] = peer_link + + for h_num in range(0, self.config.num_of_hosts_per_rack): + ls = _switches[s.name + "l" + str(h_num)] + + l_up_link = Link(ls.name + "-" + s.name) + l_up_link.resource = s + l_up_link.nw_bandwidth = self.config.bandwidth_of_host + l_up_link.avail_nw_bandwidth = l_up_link.nw_bandwidth + ls.up_links[l_up_link.name] = l_up_link + + def _set_host_topology(self, _datacenter, _host_groups, _hosts, _switches): + root_switch = _switches["r0"] + + for r_num in range(0, self.config.num_of_racks): + host_group = HostGroup(_datacenter.name + "r" + str(r_num)) + host_group.host_type = "rack" + switch = _switches[root_switch.name + "t" + str(r_num)] + host_group.switches[switch.name] = switch + _host_groups[host_group.name] = host_group + + for h_num in range(0, self.config.num_of_hosts_per_rack): + host = Host(host_group.name + "c" + str(h_num)) + leaf_switch = _switches[switch.name + "l" + str(h_num)] + host.switches[leaf_switch.name] = leaf_switch + _hosts[host.name] = host + + for r_num in range(0, self.config.num_of_racks): + host_group = _host_groups[_datacenter.name + "r" + str(r_num)] + host_group.parent_resource = _datacenter + + for h_num in range(0, self.config.num_of_hosts_per_rack): + host = _hosts[host_group.name + "c" + str(h_num)] + host.host_group = host_group + + host_group.child_resources[host.name] = host + + _datacenter.root_switches[root_switch.name] = root_switch + + for r_num in range(0, self.config.num_of_racks): + host_group = _host_groups[_datacenter.name + "r" + str(r_num)] + _datacenter.resources[host_group.name] = host_group diff --git a/valet/engine/resource_manager/topology.py b/valet/engine/resource_manager/topology.py new file mode 100755 index 0000000..23ca5f1 --- /dev/null +++ b/valet/engine/resource_manager/topology.py @@ -0,0 +1,197 @@ +#!/bin/python + +# Modified: Aug. 12, 2016 + + +import copy +import sys + +from sre_parse import isdigit +from valet.engine.resource_manager.resource_base import HostGroup, Switch, Link + + +class Topology(object): + + def __init__(self, _config, _logger): + self.config = _config + self.logger = _logger + + # Triggered by rhosts change + def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts, _switches): + result_status = self._set_host_topology(_datacenter, _host_groups, _hosts, _rhosts) + if result_status != "success": + return result_status + + result_status = self._set_network_topology(_datacenter, _host_groups, _hosts, _switches) + if result_status != "success": + return result_status + + return "success" + + # NOTE: currently, the hosts are copied from Nova + def _set_host_topology(self, _datacenter, _host_groups, _hosts, _rhosts): + for rhk, rh in _rhosts.iteritems(): + h = copy.deepcopy(rh) + + if "infra" not in h.tag: + h.tag.append("infra") + + (region_name, rack_name, _, status) = self._set_layout_by_name(rhk) + if status != "success": + self.logger.warn(status + " in host_name (" + rhk + ")") + + if region_name not in _datacenter.region_code_list: + _datacenter.region_code_list.append(region_name) + + ''' + if status == "success": + if _datacenter.region_code != None: + if _datacenter.region_code == "none": + pass + else: + if _datacenter.region_code != region_name: + _datacenter.region_code = "none" + else: + _datacenter.region_code = region_name + else: + self.logger.warn(status + " while parsing host_name (" + rhk + ")") + _datacenter.region_code = region_name + ''' + + if rack_name not in _host_groups.keys(): + host_group = HostGroup(rack_name) + host_group.host_type = "rack" + _host_groups[host_group.name] = host_group + + h.host_group = _host_groups[rack_name] + + _hosts[h.name] = h + + for hgk, hg in _host_groups.iteritems(): + hg.parent_resource = _datacenter + + for _, h in _hosts.iteritems(): + if h.host_group.name == hgk: + hg.child_resources[h.name] = h + + _datacenter.resources[hgk] = hg + + if len(_datacenter.region_code_list) > 1: + self.logger.warn("more than one region code") + + if "none" in _host_groups.keys(): + self.logger.warn("some hosts are into unknown rack") + + return "success" + + # NOTE: this is just muck-ups + def _set_network_topology(self, _datacenter, _host_groups, _hosts, _switches): + root_switch = Switch(_datacenter.name) + root_switch.switch_type = "root" + + _datacenter.root_switches[root_switch.name] = root_switch + _switches[root_switch.name] = root_switch + + for hgk, hg in _host_groups.iteritems(): + switch = Switch(hgk) + switch.switch_type = "ToR" + + up_link = Link(hgk + "-" + _datacenter.name) + up_link.resource = root_switch + up_link.nw_bandwidth = sys.maxint + up_link.avail_nw_bandwidth = up_link.nw_bandwidth + switch.up_links[up_link.name] = up_link + + hg.switches[switch.name] = switch + _switches[switch.name] = switch + + for hk, h in hg.child_resources.iteritems(): + leaf_switch = Switch(hk) + leaf_switch.switch_type = "leaf" + + l_up_link = Link(hk + "-" + hgk) + l_up_link.resource = switch + l_up_link.nw_bandwidth = sys.maxint + l_up_link.avail_nw_bandwidth = l_up_link.nw_bandwidth + leaf_switch.up_links[l_up_link.name] = l_up_link + + h.switches[leaf_switch.name] = leaf_switch + _switches[leaf_switch.name] = leaf_switch + + return "success" + + def _set_layout_by_name(self, _host_name): + region_name = None + rack_name = None + node_type = None + status = "success" + + validated_name = True + + num_of_fields = 0 + + index = 0 + end_of_region_index = 0 + end_of_rack_index = 0 + index_of_node_type = 0 + + for c in _host_name: + if index >= self.config.num_of_region_chars: + if not isdigit(c): + if index == self.config.num_of_region_chars: + status = "invalid region name = " + _host_name[:index] + c + validated_name = False + break + + if end_of_region_index == 0: + if c not in self.config.rack_code_list: + status = "invalid rack_char = " + c + validated_name = False + break + + end_of_region_index = index + num_of_fields += 1 + + if index == (end_of_region_index + 1): + status = "invalid rack name = " + _host_name[:index] + c + validated_name = False + break + + if end_of_rack_index == 0 and index > (end_of_region_index + 1): + end_of_rack_index = index + num_of_fields += 1 + + if node_type is None and end_of_rack_index > 0: + node_type = c + if node_type not in self.config.node_code_list: + status = "invalid node_char = " + c + validated_name = False + break + index_of_node_type = index + num_of_fields += 1 + + if c == '.': + break + + if index_of_node_type > 0 and index > index_of_node_type: + num_of_fields += 1 + break + + index += 1 + + if not index > (index_of_node_type + 1): + status = "invalid node name = " + _host_name[:index] + validated_name = False + + if num_of_fields != 3: + status = "invalid number of identification fields = " + str(num_of_fields) + validated_name = False + + if validated_name is False: + region_name = "none" + rack_name = "none" + else: + region_name = _host_name[:end_of_region_index] + rack_name = _host_name[:end_of_rack_index] + + return (region_name, rack_name, node_type, status) diff --git a/valet/engine/resource_manager/topology_manager.py b/valet/engine/resource_manager/topology_manager.py new file mode 100755 index 0000000..13a3d43 --- /dev/null +++ b/valet/engine/resource_manager/topology_manager.py @@ -0,0 +1,489 @@ +#!/bin/python + +# Modified: Sep. 22, 2016 + + +import threading +import time + +from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, Switch, Link +from valet.engine.resource_manager.topology import Topology + + +class TopologyManager(threading.Thread): + + def __init__(self, _t_id, _t_name, _resource, _data_lock, _config, _logger): + threading.Thread.__init__(self) + + self.thread_id = _t_id + self.thread_name = _t_name + self.data_lock = _data_lock + self.end_of_process = False + + self.resource = _resource + + self.config = _config + + self.logger = _logger + + def run(self): + self.logger.info("TopologyManager: start " + self.thread_name + " ......") + + if self.config.topology_trigger_freq > 0: + period_end = time.time() + self.config.topology_trigger_freq + + while self.end_of_process is False: + time.sleep(70) + + if time.time() > period_end: + self._run() + period_end = time.time() + self.config.topology_trigger_freq + + else: + (alarm_HH, alarm_MM) = self.config.topology_trigger_time.split(':') + now = time.localtime() + timeout = True + last_trigger_year = now.tm_year + last_trigger_mon = now.tm_mon + last_trigger_mday = now.tm_mday + + while self.end_of_process is False: + time.sleep(70) + + now = time.localtime() + if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday: + timeout = False + + if timeout is False and \ + now.tm_hour >= int(alarm_HH) and now.tm_min >= int(alarm_MM): + self._run() + + timeout = True + last_trigger_year = now.tm_year + last_trigger_mon = now.tm_mon + last_trigger_mday = now.tm_mday + + self.logger.info("TopologyManager: exit " + self.thread_name) + + def _run(self): + + self.logger.info("TopologyManager: --- start topology status update ---") + + self.data_lock.acquire() + try: + if self.set_topology() is True: + if self.resource.update_topology() is False: + # TODO(GY): ignore? + pass + finally: + self.data_lock.release() + + self.logger.info("TopologyManager: --- done topology status update ---") + + def set_topology(self): + datacenter = None + host_groups = {} + hosts = {} + switches = {} + + topology = None + if self.config.mode.startswith("sim") is True or \ + self.config.mode.startswith("test") is True: + datacenter = Datacenter("sim") + else: + datacenter = Datacenter(self.config.datacenter_name) + + topology = Topology(self.config, self.logger) + + status = topology.set_topology(datacenter, host_groups, hosts, self.resource.hosts, switches) + if status != "success": + self.logger.error("TopologyManager: " + status) + return False + + self._check_update(datacenter, host_groups, hosts, switches) + + return True + + def _check_update(self, _datacenter, _host_groups, _hosts, _switches): + for sk in _switches.keys(): + if sk not in self.resource.switches.keys(): + new_switch = self._create_new_switch(_switches[sk]) + self.resource.switches[new_switch.name] = new_switch + + new_switch.last_update = time.time() + + self.logger.warn("TopologyManager: new switch (" + new_switch.name + ") added") + + for rsk in self.resource.switches.keys(): + if rsk not in _switches.keys(): + switch = self.resource.switches[rsk] + switch.status = "disabled" + + switch.last_update = time.time() + + self.logger.warn("TopologyManager: switch (" + switch.name + ") disabled") + + for hk in _hosts.keys(): + if hk not in self.resource.hosts.keys(): + new_host = self._create_new_host(_hosts[hk]) + self.resource.hosts[new_host.name] = new_host + + new_host.last_update = time.time() + + self.logger.warn("TopologyManager: new host (" + new_host.name + ") added from configuration") + + for rhk in self.resource.hosts.keys(): + if rhk not in _hosts.keys(): + host = self.resource.hosts[rhk] + if "infra" in host.tag: + host.tag.remove("infra") + + host.last_update = time.time() + + self.logger.warn("TopologyManager: host (" + host.name + ") removed from configuration") + + for hgk in _host_groups.keys(): + if hgk not in self.resource.host_groups.keys(): + new_host_group = self._create_new_host_group(_host_groups[hgk]) + self.resource.host_groups[new_host_group.name] = new_host_group + + new_host_group.last_update = time.time() + + self.logger.warn("TopologyManager: new host_group (" + new_host_group.name + ") added") + + for rhgk in self.resource.host_groups.keys(): + if rhgk not in _host_groups.keys(): + host_group = self.resource.host_groups[rhgk] + host_group.status = "disabled" + + host_group.last_update = time.time() + + self.logger.warn("TopologyManager: host_group (" + host_group.name + ") disabled") + + for sk in _switches.keys(): + switch = _switches[sk] + rswitch = self.resource.switches[sk] + link_updated = self._check_switch_update(switch, rswitch) + if link_updated is True: + rswitch.last_update = time.time() + + for hk in _hosts.keys(): + host = _hosts[hk] + rhost = self.resource.hosts[hk] + (topology_updated, link_updated) = self._check_host_update(host, rhost) + if topology_updated is True: + rhost.last_update = time.time() + if link_updated is True: + rhost.last_link_update = time.time() + + for hgk in _host_groups.keys(): + hg = _host_groups[hgk] + rhg = self.resource.host_groups[hgk] + (topology_updated, link_updated) = self._check_host_group_update(hg, rhg) + if topology_updated is True: + rhg.last_update = time.time() + if link_updated is True: + rhg.last_link_update = time.time() + + (topology_updated, link_updated) = self._check_datacenter_update(_datacenter) + if topology_updated is True: + self.resource.datacenter.last_update = time.time() + if link_updated is True: + self.resource.datacenter.last_link_update = time.time() + + for hk, host in self.resource.hosts.iteritems(): + if host.last_update > self.resource.current_timestamp: + self.resource.update_rack_resource(host) + + for hgk, hg in self.resource.host_groups.iteritems(): + if hg.last_update > self.resource.current_timestamp: + self.resource.update_cluster_resource(hg) + + def _create_new_switch(self, _switch): + new_switch = Switch(_switch.name) + new_switch.switch_type = _switch.switch_type + + return new_switch + + def _create_new_link(self, _link): + new_link = Link(_link.name) + new_link.resource = self.resource.switches[_link.resource.name] + + new_link.nw_bandwidth = _link.nw_bandwidth + new_link.avail_nw_bandwidth = new_link.nw_bandwidth + + return new_link + + def _create_new_host(self, _host): + new_host = Host(_host.name) + new_host.tag.append("infra") + + return new_host + + def _create_new_host_group(self, _hg): + new_hg = HostGroup(_hg.name) + new_hg.host_type = _hg.host_type + + return new_hg + + def _check_switch_update(self, _switch, _rswitch): + updated = False + + if _switch.switch_type != _rswitch.switch_type: + _rswitch.switch_type = _switch.switch_type + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (switch type)") + + if _rswitch.status == "disabled": + _rswitch.status = "enabled" + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (enabled)") + + for ulk in _switch.up_links.keys(): + exist = False + for rulk in _rswitch.up_links.keys(): + if ulk == rulk: + exist = True + break + if exist is False: + new_link = self._create_new_link(_switch.up_links[ulk]) + _rswitch.up_links[new_link.name] = new_link + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)") + + for rulk in _rswitch.up_links.keys(): + exist = False + for ulk in _switch.up_links.keys(): + if rulk == ulk: + exist = True + break + if exist is False: + del _rswitch.up_links[rulk] + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)") + + for ulk in _rswitch.up_links.keys(): + link = _switch.up_links[ulk] + rlink = _rswitch.up_links[ulk] + if self._check_link_update(link, rlink) is True: + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)") + + for plk in _switch.peer_links.keys(): + exist = False + for rplk in _rswitch.peer_links.keys(): + if plk == rplk: + exist = True + break + if exist is False: + new_link = self._create_new_link(_switch.peer_links[plk]) + _rswitch.peer_links[new_link.name] = new_link + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)") + + for rplk in _rswitch.peer_links.keys(): + exist = False + for plk in _switch.peer_links.keys(): + if rplk == plk: + exist = True + break + if exist is False: + del _rswitch.peer_links[rplk] + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)") + + for plk in _rswitch.peer_links.keys(): + link = _switch.peer_links[plk] + rlink = _rswitch.peer_links[plk] + if self._check_link_update(link, rlink) is True: + updated = True + self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)") + + return updated + + def _check_link_update(self, _link, _rlink): + updated = False + + if _link.nw_bandwidth != _rlink.nw_bandwidth: + _rlink.nw_bandwidth = _link.nw_bandwidth + updated = True + + return updated + + def _check_host_update(self, _host, _rhost): + updated = False + link_updated = False + + if "infra" not in _rhost.tag: + _rhost.tag.append("infra") + updated = True + self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (tag)") + + if _rhost.host_group is None or _host.host_group.name != _rhost.host_group.name: + if _host.host_group.name in self.resource.host_groups.keys(): + _rhost.host_group = self.resource.host_groups[_host.host_group.name] + else: + _rhost.host_group = self.resource.datacenter + updated = True + self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (host_group)") + + for sk in _host.switches.keys(): + exist = False + for rsk in _rhost.switches.keys(): + if sk == rsk: + exist = True + break + if exist is False: + _rhost.switches[sk] = self.resource.switches[sk] + link_updated = True + self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (new switch)") + + for rsk in _rhost.switches.keys(): + exist = False + for sk in _host.switches.keys(): + if rsk == sk: + exist = True + break + if exist is False: + del _rhost.switches[rsk] + link_updated = True + self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (switch removed)") + + return (updated, link_updated) + + def _check_host_group_update(self, _hg, _rhg): + updated = False + link_updated = False + + if _hg.host_type != _rhg.host_type: + _rhg.host_type = _hg.host_type + updated = True + self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (hosting type)") + + if _rhg.status == "disabled": + _rhg.status = "enabled" + updated = True + self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (enabled)") + + if _rhg.parent_resource is None or _hg.parent_resource.name != _rhg.parent_resource.name: + if _hg.parent_resource.name in self.resource.host_groups.keys(): + _rhg.parent_resource = self.resource.host_groups[_hg.parent_resource.name] + else: + _rhg.parent_resource = self.resource.datacenter + updated = True + self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (parent host_group)") + + for rk in _hg.child_resources.keys(): + exist = False + for rrk in _rhg.child_resources.keys(): + if rk == rrk: + exist = True + break + if exist is False: + if _rhg.host_type == "rack": + _rhg.child_resources[rk] = self.resource.hosts[rk] + elif _rhg.host_type == "cluster": + _rhg.child_resources[rk] = self.resource.host_groups[rk] + updated = True + self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new child host)") + + for rrk in _rhg.child_resources.keys(): + exist = False + for rk in _hg.child_resources.keys(): + if rrk == rk: + exist = True + break + if exist is False: + del _rhg.child_resources[rrk] + updated = True + self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (child host removed)") + + for sk in _hg.switches.keys(): + exist = False + for rsk in _rhg.switches.keys(): + if sk == rsk: + exist = True + break + if exist is False: + _rhg.switches[sk] = self.resource.switches[sk] + link_updated = True + self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new switch)") + + for rsk in _rhg.switches.keys(): + exist = False + for sk in _hg.switches.keys(): + if rsk == sk: + exist = True + break + if exist is False: + del _rhg.switches[rsk] + link_updated = True + self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (switch removed)") + + return (updated, link_updated) + + def _check_datacenter_update(self, _datacenter): + updated = False + link_updated = False + + for rc in _datacenter.region_code_list: + if rc not in self.resource.datacenter.region_code_list: + self.resource.datacenter.region_code_list.append(rc) + updated = True + self.logger.warn("TopologyManager: datacenter updated (new region code, " + rc + ")") + + for rrc in self.resource.datacenter.region_code_list: + if rrc not in _datacenter.region_code_list: + self.resource.datacenter.region_code_list.remove(rrc) + updated = True + self.logger.warn("TopologyManager: datacenter updated (region code, " + rrc + ", removed)") + + for rk in _datacenter.resources.keys(): + exist = False + for rrk in self.resource.datacenter.resources.keys(): + if rk == rrk: + exist = True + break + if exist is False: + r = _datacenter.resources[rk] + if isinstance(r, HostGroup): + self.resource.datacenter.resources[rk] = self.resource.host_groups[rk] + elif isinstance(r, Host): + self.resource.datacenter.resources[rk] = self.resource.hosts[rk] + updated = True + self.logger.warn("TopologyManager: datacenter updated (new resource)") + + for rrk in self.resource.datacenter.resources.keys(): + exist = False + for rk in _datacenter.resources.keys(): + if rrk == rk: + exist = True + break + if exist is False: + del self.resource.datacenter.resources[rrk] + updated = True + self.logger.warn("TopologyManager: datacenter updated (resource removed)") + + for sk in _datacenter.root_switches.keys(): + exist = False + for rsk in self.resource.datacenter.root_switches.keys(): + if sk == rsk: + exist = True + break + if exist is False: + self.resource.datacenter.root_switches[sk] = self.resource.switches[sk] + link_updated = True + self.logger.warn("TopologyManager: datacenter updated (new switch)") + + for rsk in self.resource.datacenter.root_switches.keys(): + exist = False + for sk in _datacenter.root_switches.keys(): + if rsk == sk: + exist = True + break + if exist is False: + del self.resource.datacenter.root_switches[rsk] + link_updated = True + self.logger.warn("TopologyManager: datacenter updated (switch removed)") + + return (updated, link_updated) diff --git a/valet/engine/resource_manager/topology_simulator.py b/valet/engine/resource_manager/topology_simulator.py new file mode 100644 index 0000000..5a47151 --- /dev/null +++ b/valet/engine/resource_manager/topology_simulator.py @@ -0,0 +1,144 @@ +#!/bin/python + + +################################################################################################################# +# Author: Gueyoung Jung +# Contact: gjung@research.att.com +# Version 2.0.2: Feb. 9, 2016 +# +# Functions +# - Simulate datacenter configurations (i.e., layout, cabling) +# +################################################################################################################# + + +from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link + + +class SimTopology(object): + + def __init__(self, _config): + self.config = _config + + def set_topology(self, _datacenter, _host_groups, _hosts, _switches): + self._set_network_topology(_switches) + self._set_host_topology(_datacenter, _host_groups, _hosts, _switches) + + return "success" + + def _set_network_topology(self, _switches): + root_switch = Switch("r0") + root_switch.switch_type = "root" + _switches[root_switch.name] = root_switch + + if self.config.num_of_spine_switches > 0: + for s_num in range(0, self.config.num_of_spine_switches): + switch = Switch(root_switch.name + "s" + str(s_num)) + switch.switch_type = "spine" + _switches[switch.name] = switch + + for r_num in range(0, self.config.num_of_racks): + switch = Switch(root_switch.name + "t" + str(r_num)) + switch.switch_type = "ToR" + _switches[switch.name] = switch + + for h_num in range(0, self.config.num_of_hosts_per_rack): + leaf_switch = Switch(switch.name + "l" + str(h_num)) + leaf_switch.switch_type = "leaf" + _switches[leaf_switch.name] = leaf_switch + + if self.config.num_of_spine_switches > 0: + for s_num in range(0, self.config.num_of_spine_switches): + s = _switches[root_switch.name + "s" + str(s_num)] + + up_link = Link(s.name + "-" + root_switch.name) + up_link.resource = root_switch + up_link.nw_bandwidth = self.config.bandwidth_of_spine + up_link.avail_nw_bandwidth = up_link.nw_bandwidth + s.up_links[up_link.name] = up_link + + if self.config.num_of_spine_switches > 1: + ps = None + if (s_num % 2) == 0: + if (s_num + 1) < self.config.num_of_spine_switches: + ps = _switches[root_switch.name + "s" + str(s_num + 1)] + else: + ps = _switches[root_switch.name + "s" + str(s_num - 1)] + if ps is not None: + peer_link = Link(s.name + "-" + ps.name) + peer_link.resource = ps + peer_link.nw_bandwidth = self.config.bandwidth_of_spine + peer_link.avail_nw_bandwidth = peer_link.nw_bandwidth + s.peer_links[peer_link.name] = peer_link + + for r_num in range(0, self.config.num_of_racks): + s = _switches[root_switch.name + "t" + str(r_num)] + + parent_switch_list = [] + if self.config.num_of_spine_switches > 0: + for s_num in range(0, self.config.num_of_spine_switches): + parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)]) + else: + parent_switch_list.append(_switches[root_switch.name]) + + for parent_switch in parent_switch_list: + up_link = Link(s.name + "-" + parent_switch.name) + up_link.resource = parent_switch + up_link.nw_bandwidth = self.config.bandwidth_of_rack + up_link.avail_nw_bandwidth = up_link.nw_bandwidth + s.up_links[up_link.name] = up_link + + if self.config.num_of_racks > 1: + ps = None + if (r_num % 2) == 0: + if (r_num + 1) < self.config.num_of_racks: + ps = _switches[root_switch.name + "t" + str(r_num + 1)] + else: + ps = _switches[root_switch.name + "t" + str(r_num - 1)] + if ps is not None: + peer_link = Link(s.name + "-" + ps.name) + peer_link.resource = ps + peer_link.nw_bandwidth = self.config.bandwidth_of_rack + peer_link.avail_nw_bandwidth = peer_link.nw_bandwidth + s.peer_links[peer_link.name] = peer_link + + for h_num in range(0, self.config.num_of_hosts_per_rack): + ls = _switches[s.name + "l" + str(h_num)] + + l_up_link = Link(ls.name + "-" + s.name) + l_up_link.resource = s + l_up_link.nw_bandwidth = self.config.bandwidth_of_host + l_up_link.avail_nw_bandwidth = l_up_link.nw_bandwidth + ls.up_links[l_up_link.name] = l_up_link + + def _set_host_topology(self, _datacenter, _host_groups, _hosts, _switches): + root_switch = _switches["r0"] + + for r_num in range(0, self.config.num_of_racks): + host_group = HostGroup(_datacenter.name + "r" + str(r_num)) + host_group.host_type = "rack" + switch = _switches[root_switch.name + "t" + str(r_num)] + host_group.switches[switch.name] = switch + _host_groups[host_group.name] = host_group + + for h_num in range(0, self.config.num_of_hosts_per_rack): + host = Host(host_group.name + "c" + str(h_num)) + leaf_switch = _switches[switch.name + "l" + str(h_num)] + host.switches[leaf_switch.name] = leaf_switch + _hosts[host.name] = host + + for r_num in range(0, self.config.num_of_racks): + host_group = _host_groups[_datacenter.name + "r" + str(r_num)] + host_group.parent_resource = _datacenter + + for h_num in range(0, self.config.num_of_hosts_per_rack): + host = _hosts[host_group.name + "c" + str(h_num)] + host.host_group = host_group + + host_group.child_resources[host.name] = host + + _datacenter.root_switches[root_switch.name] = root_switch + + for r_num in range(0, self.config.num_of_racks): + host_group = _host_groups[_datacenter.name + "r" + str(r_num)] + _datacenter.resources[host_group.name] = host_group diff --git a/valet/ha/__init__.py b/valet/ha/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/ha/ha_valet.cfg b/valet/ha/ha_valet.cfg new file mode 100644 index 0000000..87e6ab7 --- /dev/null +++ b/valet/ha/ha_valet.cfg @@ -0,0 +1,66 @@ +#!/usr/bin/env python +# vi: sw=4 ts=4: +# +# --------------------------------------------------------------------------- +# Copyright (c) 2013-2015 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# --------------------------------------------------------------------------- +# +# +# configuration file for havalet - valet processes monitoring tool. +# group name is a logical process name. +# properties MUST have the following properties: +# 'host' +# 'user' +# 'start' +# 'stop' +# 'test' +# +# IMPORTANT: +# "test" - MUST return a value != 0, this value should reflects +# the monitored process priority. +# +# "process's priority" - used for active/stand-by scenarios. +# MUST be greater than 0 - lower number means higher priority. +# e.g. instance which returns '1', as its response to "test" invocation, +# will get precedence over instance which returns '2' as its priority. +# priority 0 means thr process is down. +# +# "stand_by_list" - OPTIONAL property. comma delimited hosts list. +# used on active/stand-by scenarios. +# ha_valet will attempt to restart the instance with the lower priority value, +# only if the instance fails to start, ha_valet will try to restart the process +# on the following host in the list. + +[Ostro] +order=5 +priority=1 +host=valet1 +user=m04060 +stand_by_list=valet2 +start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c start'" % (user, host) +stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host) +test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py ; exit $?'" % (user, host) + + +[ValetApi] +order=4 +priority=1 +host=valet1 +stand_by_list=valet2 +user=m04060 +start="ssh -o ConnectTimeout=1 %s@%s 'sudo apachectl start'" % (user, host) +stop="ssh -o ConnectTimeout=1 %s@%s 'sudo apachectl stop'" % (user, host) +test="exit $(wget -T 1 -t 1 -qO- http://%s:8090/v1 | grep CURRENT | wc -l)" % (host) + diff --git a/valet/ha/ha_valet.py b/valet/ha/ha_valet.py new file mode 100644 index 0000000..3960f5c --- /dev/null +++ b/valet/ha/ha_valet.py @@ -0,0 +1,505 @@ +#!/usr/bin/env python +# vi: sw=4 ts=4: +# +# --------------------------------------------------------------------------- +# Copyright (c) 2013-2015 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# --------------------------------------------------------------------------- +# + +""" + + Mnemonic: ha_valet.py + Abstract: High availability script for valet processes. + starts it's configured processes, and pings for their availability. + If local instances are not running, then makes the + current instances start. If it finds multiple instances running, then + determines which instance should be shut down based on priorities. + + Author: Amnon Sagiv based on ha_tegu by Kaustubh Joshi + + ------------------------------------------------------------------------------ + + Algorithm + ----------- + The ha_valet script runs on each valet node in a continuous loop checking for + heartbeats from all the valet nodes found in the "stand_by_list" conf property once + every 5 secs (default). A heartbeat is obtained by invoking the "test_command" + conf property. + If exactly one monitored process instance is running, the script does + nothing. If no instance is running, then the local instance is activated after + waiting for 5*priority seconds to let a higher priority valet take over + first. A valet monitored process's priority is determined by its conf. + If the current node's is running and another is found, then a + conflict resolution process is invoked whereby the priorities of both + processes are compared, and the instance with the higher value is deactivated. + + IMPORTANT: test_command must return a value != 0, this is value should reflects + the monitored process priority. + """ + +import logging.handlers +import os +from oslo_config import cfg +import socket +import subprocess +import threading +import time +# import argparse +# from oslo_log import log as logging + +CONF = cfg.CONF + +# Directory locations +LOG_DIR = os.getenv('HA_VALET_LOGD', '/var/log/havalet/') +ETC_DIR = os.getenv('HA_VALET_ETCD', '/etc/valet/ha/') +DEFAULT_CONF_FILE = ETC_DIR + 'ha_valet.cfg' + +# Set the maximum logfile size as Byte for time-series log files +max_log_size = 1000000 +# Set the maximum number of time-series log files +max_num_of_logs = 10 + + +PRIMARY_SETUP = 1 +RETRY_COUNT = 3 # How many times to retry ping command +CONNECT_TIMEOUT = 3 # Ping timeout +MAX_QUICK_STARTS = 10 # we stop if there are > 10 restarts in quick succession +QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this + +# HA Configuration +HEARTBEAT_SEC = 5 # Heartbeat interval in seconds + + +NAME = 'name' +ORDER = 'order' +HOST = 'host' +USER = 'user' +PRIORITY = 'priority' +START_COMMAND = 'start' +STOP_COMMAND = 'stop' +TEST_COMMAND = 'test' +STAND_BY_LIST = 'stand_by_list' + +ostro_group = cfg.OptGroup(name='Ostro', title='Valet Engine HA conf') +api_group = cfg.OptGroup(name='ValetApi', title='Valet Api HA conf') + +havalet_opts = [ + cfg.IntOpt(PRIORITY, default=1, help='master slave distinguish'), + cfg.IntOpt(ORDER, help='launching order'), + cfg.StrOpt(HOST, help='where the monitored process is running on'), + cfg.StrOpt(USER, help='linux user'), + cfg.ListOpt(STAND_BY_LIST, help='monitored hosts list'), + cfg.StrOpt(START_COMMAND, help='launch command'), + cfg.StrOpt(STOP_COMMAND, help='stop command'), + cfg.StrOpt(TEST_COMMAND, help='test command') +] + +CONF.register_group(api_group) +CONF.register_opts(havalet_opts, api_group) + +CONF.register_group(ostro_group) +CONF.register_opts(havalet_opts, ostro_group) + + +def read_conf(): + """returns dictionary of configured processes""" + return dict([ + ('Ostro', { + NAME: 'Ostro', + ORDER: CONF.Ostro.order, + HOST: CONF.Ostro.host, + USER: CONF.Ostro.user, + PRIORITY: CONF.Ostro.priority, + START_COMMAND: CONF.Ostro.start, + STOP_COMMAND: CONF.Ostro.stop, + TEST_COMMAND: CONF.Ostro.test, + STAND_BY_LIST: CONF.Ostro.stand_by_list + }), + + ('ValetApi', { + NAME: 'ValetApi', + ORDER: CONF.ValetApi.order, + HOST: CONF.ValetApi.host, + USER: CONF.ValetApi.user, + PRIORITY: CONF.ValetApi.priority, + START_COMMAND: CONF.ValetApi.start, + STOP_COMMAND: CONF.ValetApi.stop, + TEST_COMMAND: CONF.ValetApi.test, + STAND_BY_LIST: CONF.ValetApi.stand_by_list + })]) + + +def prepare_log(obj, name): + obj.log = logging.getLogger(name) + obj.log.setLevel(logging.DEBUG) + # logging.register_options(CONF) + # logging.setup(CONF, 'valet') + handler = logging.handlers.RotatingFileHandler(LOG_DIR + name + '.log', maxBytes=max_log_size, + backupCount=max_num_of_logs) + fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + handler.setFormatter(fmt) + obj.log.addHandler(handler) + + +class HaValetThread (threading.Thread): + + def __init__(self, data, exit_event): + threading.Thread.__init__(self) + # self.exitFlag = exit_event + self.data = data + self.log = None + + def run(self): + """Main function""" + prepare_log(self, self.data[NAME]) + self.log.info('HA Valet - ' + self.data[NAME] + ' Watcher Thread - starting') + + fqdn_list = [] + this_node = socket.getfqdn() + fqdn_list.append(this_node) + + # Read list of standby valet nodes and find us + # standby_list = [] + standby_list = self.data.get(STAND_BY_LIST, None) + + while not len(standby_list) is 0: # loop until we find us + self.log.debug("stand by list: " + str(standby_list)) + try: + for fqdn in fqdn_list: + self.log.debug("fqdn_list: " + str(fqdn_list)) + if fqdn in standby_list: + this_node = fqdn + break + standby_list.remove(this_node) + self.data[STAND_BY_LIST] = standby_list + self.log.debug("modified stand by list: " + str(standby_list)) + except ValueError: + self.log.debug("host " + this_node + " is not in standby list: %s - continue" + % str(standby_list)) + break + + # Loop forever sending pings + self._main_loop(this_node) + self.log.info("HA Valet Watcher Thread - going down!") + + def use(self, param): + pass + + def _main_loop(self, this_node): + """ Main heartbeat and liveness check loop + + :param this_node: host name + :type this_node: string + :return: None + :rtype: + """ + quick_start = 0 # number of restarts close together + last_start = 0 + priority_wait = False + + """ + DO NOT RENAME, DELETE, MOVE the following parameters, + they may be referenced from within the process commands + """ + host = self.data.get(HOST, 'localhost') + user = self.data.get(USER, None) + self.use(user) + my_priority = int(self.data.get(PRIORITY, 1)) + start_command = eval(self.data.get(START_COMMAND, None)) + stop_command = self.data.get(STOP_COMMAND, None) + test_command = self.data.get(TEST_COMMAND, None) + standby_list = self.data.get(STAND_BY_LIST) + + while True: + if not priority_wait: + # Normal heartbeat + time.sleep(HEARTBEAT_SEC) + else: + # No valet running. Wait for higher priority valet to activate. + time.sleep(HEARTBEAT_SEC * my_priority) + + self.log.info('checking status here - ' + host + ', my priority: ' + str(my_priority)) + i_am_active, priority = self._is_active(eval(test_command)) + self.log.info(host + ': host_active = ' + str(i_am_active) + ', ' + str(priority)) + any_active = i_am_active + self.log.info('any active = ' + str(any_active)) + + # Check for active valets + standby_list_is_empty = not standby_list + if not standby_list_is_empty: + self.log.debug('main loop: standby_list is not empty ' + str(standby_list)) + for host_in_list in standby_list: + if host_in_list == this_node: + self.log.info('host_in_list is this_node - skipping') + continue + + self.log.info('checking status on - ' + host_in_list) + host = host_in_list + host_active, host_priority = self._is_active(eval(test_command)) + host = self.data.get(HOST, 'localhost') + self.log.info(host_in_list + ' - host_active = ' + str(host_active) + ', ' + str(host_priority)) + # Check for split brain: 2 valets active + if i_am_active and host_active: + self.log.info('found two live instances, checking priorities') + should_be_active = self._should_be_active(host_priority, my_priority) + if should_be_active: + self.log.info('deactivate myself, ' + host_in_list + ' already running') + self._deactivate_process(eval(stop_command)) # Deactivate myself + i_am_active = False + else: + self.log.info('deactivate ' + self.data[NAME] + ' on ' + host_in_list + + ', already running here') + host = host_in_list + self._deactivate_process(eval(stop_command)) # Deactivate other valet + host = self.data.get(HOST, 'localhost') + + # Track that at-least one valet is active + any_active = any_active or host_active + + # If no active process or I'm primary, then we must try to start one + if not any_active or (not i_am_active and my_priority == PRIMARY_SETUP): + self.log.warn('there is no instance up') + self.log.info('Im primary instance: ' + str(my_priority is PRIMARY_SETUP)) + if priority_wait or my_priority == PRIMARY_SETUP: + now = int(time.time()) + if now - last_start < QUICK_RESTART_SEC: # quick restart (crash?) + quick_start += 1 + if quick_start > MAX_QUICK_STARTS: + self.log.critical("too many restarts in quick succession.") + else: + quick_start = 0 # reset if it's been a while since last restart + + if last_start == 0: + diff = "never by this instance" + else: + diff = "%d seconds ago" % (now - last_start) + + last_start = now + priority_wait = False + if (not i_am_active and my_priority == PRIMARY_SETUP) or (standby_list is not None): + self.log.info('no running instance found, starting here; last start %s' % diff) + self._activate_process(start_command, my_priority) + else: + host = standby_list[0] # LIMITATION - supporting only 1 stand by host + self.log.info('no running instances found, starting on %s; last start %s' % (host, diff)) + self._activate_process(start_command, my_priority) + host = self.data.get(HOST, 'localhost') + else: + priority_wait = True + else: + self.log.info("status: up and running") + # end loop + + def _should_be_active(self, host_priority, my_priority): + """ Returns True if host should be active as opposed to current node, based on the hosts priorities. + + Lower value means higher Priority, + 0 (zero) - invalid priority (e.g. process is down) + + :param host_priority: other host's priority + :type host_priority: int + :param my_priority: my priority + :type my_priority: int + :return: True/False + :rtype: bool + """ + self.log.info('my priority is %d, remote priority is %d' % (my_priority, host_priority)) + return host_priority < my_priority + + def _is_active(self, call): + """ Return 'True, Priority' if valet is running on host + + 'False, None' Otherwise. + """ + + # must use no-proxy to avoid proxy servers gumming up the works + for i in xrange(RETRY_COUNT): + try: + self.log.info('ping (retry %d): %s' % (i, call)) + proc = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + priority = proc.wait() + if priority == 255: # no route to host + priority = 0 + out, err = proc.communicate() + self.log.debug('out: ' + out + ', err: ' + err) + self.log.info('ping result (should be > 0): %s' % (str(priority))) + return (priority > 0), priority + except subprocess.CalledProcessError: + self.log.error('ping error: ' + str(subprocess.CalledProcessError)) + continue + return False, None + + def _deactivate_process(self, deactivate_command): + """ Deactivate valet on a given host. If host is omitted, local + + valet is stopped. Returns True if successful, False on error. + """ + + try: + # call = "'" + deactivate_command % (PROTO, host, port) + "'" + self.log.info('deactivate_command: ' + deactivate_command) + subprocess.check_call(deactivate_command, shell=True) + return True + except subprocess.CalledProcessError as e: + self.log.error(str(e)) + return False + + def _activate_process(self, activate_command, priority): + """ Activate valet on a given host. If host is omitted, local + + valet is started. Returns True if successful, False on error. + """ + + try: + self.log.info('activate_command: ' + activate_command) + subprocess.check_call(activate_command, shell=True) + time.sleep(HEARTBEAT_SEC * priority) # allow some grace period + return True + except subprocess.CalledProcessError as e: + self.log.error(str(e)) + return False + + +class HAValet(object): + + def __init__(self): + if not os.path.exists(LOG_DIR): + os.makedirs(LOG_DIR) + self.log = None + + @DeprecationWarning + def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE, process=''): + """ This function reads the valet config file and returns configuration + + attributes in key/value format + + :param conf_file_name: config file name + :type conf_file_name: string + :param process: specific process name + when not supplied - the module launches all the processes in the configuration + :type process: string + :return: dictionary of configured monitored processes + :rtype: dict + """ + + cdata = {} + section = '' + + try: + with open(conf_file_name, 'r') as valet_conf_file: + for line in valet_conf_file.readlines(): + if line.strip(' \t\r\n')[:1] == '#' or line.__len__() == 2: + continue + elif line.lstrip(' \t\r\n')[:1] == ':': + tokens = line.lstrip(' \t\n\r').split(' ') + section = tokens[0][1:].strip('\n\r\n') + cdata[section] = {} + cdata[section][NAME] = section + else: + if line[:1] == '\n': + continue + tokens = line.split('=') + key = tokens[0].strip(' \t\n\r') + value = tokens[1].strip(' \t\n\r') + cdata[section][key] = value + + # if need to run a specific process + # remove all others + if process is not '': + for key in cdata.keys(): + if key != process: + del cdata[key] + + return cdata + except OSError: + print('unable to open %s file for some reason' % conf_file_name) + return cdata + + def _valid_process_conf_data(self, process_data): + """ verify all mandatory parameters are found in the monitored process configuration only standby_list is optional + + :param process_data: specific process configuration parameters + :type process_data: dict + :return: are all mandatory parameters are found + :rtype: bool + """ + + if (process_data.get(HOST) is not None and + process_data.get(PRIORITY) is not None and + process_data.get(ORDER) is not None and + process_data.get(START_COMMAND) is not None and + process_data.get(STOP_COMMAND) is not None and + process_data.get(TEST_COMMAND) is not None): + return True + else: + return False + + def start(self): + """Start valet HA - Main function""" + prepare_log(self, 'havalet') + self.log.info('ha_valet v1.1 starting') + + # parser = argparse.ArgumentParser() + # parser.add_argument('-p', '--process', help='process name to monitor', default='') + # parser.add_argument('-f', '--file', help='configuraion file', default=DEFAULT_CONF_FILE) + # args = parser.parse_args() + + conf_data = read_conf() + + # if a specific process was asked for.. + # remove all others + # if args.process is not '': + # for key in conf_data.keys(): + # if key != args.process: + # del conf_data[key] + # + # if conf_data.get(args.process) is None: + # print args.process, ' - process not found in conf.' + + if len(conf_data.keys()) is 0: + self.log.warn('Processes list is empty - leaving.') + return + + threads = [] + exit_event = threading.Event() + + # sort by launching order + proc_sorted = sorted(conf_data.values(), key=lambda d: int(d[ORDER])) + + for proc in proc_sorted: + if self._valid_process_conf_data(proc): + self.log.info('Launching: ' + proc[NAME] + ' - parameters: ' + str(proc)) + thread = HaValetThread(proc, exit_event) + time.sleep(HEARTBEAT_SEC) + thread.start() + threads.append(thread) + else: + self.log.info(proc[NAME] + " section is missing mandatory parameter.") + continue + + self.log.info('on air.') + + while not exit_event.isSet(): + time.sleep(HEARTBEAT_SEC) + + # Wait for all threads to complete + for thread in threads: + thread.join() + + self.log.info('ha_valet v1.1 exiting') + +if __name__ == '__main__' or __name__ == "main": + CONF(default_config_files=[DEFAULT_CONF_FILE]) + HAValet().start() diff --git a/valet/ha/ha_valet2.cfg b/valet/ha/ha_valet2.cfg new file mode 100644 index 0000000..8a24229 --- /dev/null +++ b/valet/ha/ha_valet2.cfg @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# vi: sw=4 ts=4: +# +# --------------------------------------------------------------------------- +# Copyright (c) 2013-2015 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# --------------------------------------------------------------------------- +# +# +# configuration file for havalet - valet processes monitoring tool. +# group name is a logical process name. +# properties MUST have the following properties: +# 'host' +# 'user' +# 'start' +# 'stop' +# 'test' +# +# IMPORTANT: +# "test" - MUST return a value != 0, this value should reflects +# the monitored process priority. +# +# "process's priority" - used for active/stand-by scenarios. +# MUST be greater than 0 - lower number means higher priority. +# e.g. instance which returns '1', as its response to "test" invocation, +# will get precedence over instance which returns '2' as its priority. +# priority 0 means thr process is down. +# +# "stand_by_list" - OPTIONAL property. comma delimited hosts list. +# used on active/stand-by scenarios. +# ha_valet will attempt to restart the instance with the lower priority value, +# only if the instance fails to start, ha_valet will try to restart the process +# on the following host in the list. + +[Ostro] +order=5 +priority=2 +host=valet2 +user=m04060 +stand_by_list=valet1 +start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c start'" % (user, host) +stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host) +test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py ; exit $?'" % (user, host) + + +[ValetApi] +order=4 +priority=2 +host=valet2 +stand_by_list=valet1 +user=m04060 +start="ssh -o ConnectTimeout=1 %s@%s 'sudo apachectl start'" % (user, host) +stop="ssh -o ConnectTimeout=1 %s@%s 'sudo apachectl stop'" % (user, host) +test="exit $(wget -T 1 -t 1 -qO- http://%s:8090/v1 | grep CURRENT | wc -l)" % (host) + + diff --git a/valet/tests/__init__.py b/valet/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/api/README.md b/valet/tests/api/README.md new file mode 100644 index 0000000..0d7985f --- /dev/null +++ b/valet/tests/api/README.md @@ -0,0 +1,15 @@ +# Using Postman with valet-api + +The placement API (valet-api) can be exercised using [Postman](http://www.getpostman.com/), by importing the file ``Valet.json.postman_collection``. + +Before using the collection, create a Postman environment with the following settings: + +* ``valet``: valet-api endpoint (e.g., ``http://controller:8090``) +* ``keystone``: keystone-api endpoint (e.g., ``http://controller:5000``) +* ``tenant_name``: tenant name (e.g., ``service``) +* ``username``: username (e.g., ``valet``) +* ``password``: password + +All valet-api requests require a valid Keystone token. Use the **Keystone Generate Token v2** POST request to generate one. It will be automatically stored in the Postman environment and used for future API requests. Once the token expires ("Authorization Required"), simply generate a new token. + +See the [valet-api](https://github.com/att-comdev/valet/blob/master/README.md) API documentation for a complete list of supported requests. diff --git a/valet/tests/api/Valet.json.postman_collection b/valet/tests/api/Valet.json.postman_collection new file mode 100644 index 0000000..f18da7f --- /dev/null +++ b/valet/tests/api/Valet.json.postman_collection @@ -0,0 +1,770 @@ +{ + "id": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "name": "Valet", + "description": "", + "order": [], + "folders": [ + { + "id": "78d50feb-2845-ddae-a871-e618c72436d6", + "name": "Groups", + "description": "", + "order": [ + "fe17a04d-abbf-03de-ba36-9a7ee1541502", + "c852a502-6158-016c-764e-946fa13905df", + "1922696c-0a91-1b66-6bf2-b6003453a7ca", + "29eb93f0-95b8-12cf-f07f-4ed23a61e2a1", + "9aacf46b-ed0d-1ff5-fa18-014559eef1e1", + "0910e354-f9a0-913c-6b30-02b547ca165f", + "909ecbf8-0d2a-2c3d-6ad3-2f91071bba68", + "7f2210d2-5a8d-f827-64ac-77973c3019c4", + "9aa51fea-d49b-525f-2253-d96f5ba2933e", + "5b6f5c2a-3270-55b5-1332-d32ca67577a6", + "84654cd5-8bfa-9d5e-3cfa-8dde36db3088", + "4c58314a-7f47-7c64-9c8f-abf1bc64aed5" + ], + "owner": "61155", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "8c978d71-6846-1f14-ccc1-89f00ac3607f", + "name": "Keystone", + "description": "", + "order": [ + "ab18d511-890d-d13e-baea-49cb77121a3b", + "832f3e6d-e96c-de32-f7bf-7a4fe2607a42" + ], + "owner": "61155", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "6c24b378-57bb-2c47-6932-6cbd6399bb72", + "name": "Placements", + "description": "", + "order": [ + "2cc7dcd2-5a8d-b356-abdd-784ca7f0ea38", + "57b2dc09-eb7b-c0a9-7f2f-91f7c6686418", + "9defd429-f48a-c8ae-2c3b-a4ee3c229af0", + "69ee343d-bef9-49cc-23bd-24c1197b1202", + "c51917b7-cc34-3b7a-ce4a-74e5df50d574" + ], + "owner": "61155", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "36865d4b-f787-91b6-85a1-a0b847128e5d", + "name": "Plans", + "description": "", + "order": [ + "0683c82e-998e-0616-cbb3-7a96e87e7bdd", + "1b59672b-a98a-cd7d-c923-04f2f337a8b3", + "6717d467-8a9e-fe53-1fcc-b7a0af7f5516", + "32b82f81-bd62-392d-1370-09db2373b020", + "183647df-e27b-069f-90a1-2d007dca552d", + "a37d1925-6cd8-c48b-705b-72c9643b3fb1", + "a62314a3-9dbf-07bf-b978-83b12f14d6d3", + "157b06f2-2de5-584f-9892-930740fbb64d", + "a0b31366-660a-99a2-6bae-03089b1001b5", + "0731ab7b-52d4-be45-b66f-f20aee7ca3de", + "1ec2b0eb-5b1d-dab2-822e-db21a392e41a", + "03446e5d-fb91-4302-9d6a-b28a664fdedd" + ], + "owner": "61155", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "5558cf5e-a565-9fe5-dc67-4115d1b31798", + "name": "Root", + "description": "Root methods", + "order": [ + "f3b36953-a643-fd17-d38e-1842e200d3ff", + "2a20ce60-0a01-c370-8f42-4d67680f9930", + "d00847d3-e72e-062a-ec1a-029f8dbc49d5", + "66f61587-10f9-9454-d9b8-7f1f0748b7b1" + ], + "owner": "61155", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "540bc889-a135-870c-92d8-87470353dc5d", + "name": "Status", + "description": "", + "order": [ + "cb751a74-fe58-5b78-5e85-85c2c23a136b", + "db1cf4f3-8ad1-321d-c5fd-b7faf9821bff", + "6fcbe513-81d4-2314-6a0c-871fc5ce2dab" + ], + "owner": "61155", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + } + ], + "timestamp": 0, + "owner": "61155", + "remoteLink": "", + "public": false, + "requests": [ + { + "id": "03446e5d-fb91-4302-9d6a-b28a664fdedd", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans/e624474b-fc80-4053-ab5f-45cc1030e692", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772993195, + "name": "Single Plan", + "description": "Supported methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "0683c82e-998e-0616-cbb3-7a96e87e7bdd", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans", + "preRequestScript": "", + "pathVariables": {}, + "method": "POST", + "data": [], + "dataMode": "raw", + "tests": "var data = JSON.parse(responseBody);\npostman.setEnvironmentVariable(\"plan_id\", data['id']);", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463873704816, + "name": "Create plan", + "description": "Create a plan.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"plan_name\": \"e624474b-fc80-4053-ab5f-45cc1030e692\",\n \"resources\": {\n \"64421fbc-f39d-4a41-b5c1-4aeae2ebf757\": {\n \"properties\": {\n \"size\": 1\n },\n \"type\": \"OS::Cinder::Volume\",\n \"name\": \"my_volume\"\n },\n \"b71bedad-dd57-4942-a7bd-ab074b72d652\": {\n \"properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu12_04\",\n \"key_name\": \"demo\",\n \"networks\": [\n {\n \"network\": \"demo-net\"\n }\n ]\n },\n \"type\": \"OS::Nova::Server\",\n \"name\": \"my_instance\"\n }\n },\n \"stack_id\": \"e624474b-fc80-4053-ab5f-45cc1030e692\",\n \"timeout\": \"60 sec\"\n}\n" + }, + { + "id": "0731ab7b-52d4-be45-b66f-f20aee7ca3de", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans/1d146103-2cae-4945-9821-59785c4454b8", + "preRequestScript": "", + "pathVariables": {}, + "method": "DELETE", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772986546, + "name": "Delete plan (ceph example)", + "description": "Deletes a plan.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "" + }, + { + "id": "0910e354-f9a0-913c-6b30-02b547ca165f", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}/members/b7d0e9b175294b649464caa3411adb3f", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772889358, + "name": "Verify group membership", + "description": "Verify a member is in a group.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"members\": [\n \"922c5cab-6a1b-4e1e-bb10-331633090c41\",\n \"b71bedad-dd57-4942-a7bd-ab074b72d652\"\n ]\n}" + }, + { + "id": "157b06f2-2de5-584f-9892-930740fbb64d", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans/e624474b-fc80-4053-ab5f-45cc1030e692", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772978864, + "name": "Show plan details", + "description": "Shows details for a plan.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "183647df-e27b-069f-90a1-2d007dca552d", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772959783, + "name": "List plans", + "description": "Lists plans.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "1922696c-0a91-1b66-6bf2-b6003453a7ca", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772876510, + "name": "Show group details", + "description": "Shows details for a group.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "1b59672b-a98a-cd7d-c923-04f2f337a8b3", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans", + "preRequestScript": "", + "pathVariables": {}, + "method": "POST", + "data": [], + "dataMode": "raw", + "tests": "var data = JSON.parse(responseBody);\npostman.setEnvironmentVariable(\"plan_id\", data['id']);", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772948319, + "name": "Create plan with Exclusivity Group", + "description": "Create a plan.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"plan_name\": \"e624474b-fc80-4053-ab5f-45cc1030e692\",\n \"resources\": {\n \"56c6f15a-5af6-4540-8258-53cc020d26a9\": {\n \"Properties\": {\n \"group_name\": \"group\",\n \"group_type\": \"exclusivity\",\n \"level\": \"host\",\n \"resources\": [\n \"b71bedad-dd57-4942-a7bd-ab074b72d652\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"my_group_assignment\"\n },\n \"b71bedad-dd57-4942-a7bd-ab074b72d652\": {\n \"Properties\": {\n \"name\": \"test\",\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu12_04\",\n \"key_name\": \"demo\",\n \"networks\": [\n {\n \"network\": \"demo-net\"\n }\n ]\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"my_instance\"\n }\n },\n \"stack_id\": \"e624474b-fc80-4053-ab5f-45cc1030e692\",\n \"timeout\": \"60 sec\"\n}" + }, + { + "id": "1ec2b0eb-5b1d-dab2-822e-db21a392e41a", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772989877, + "name": "Plans", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "29eb93f0-95b8-12cf-f07f-4ed23a61e2a1", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}", + "preRequestScript": "", + "pathVariables": {}, + "method": "PUT", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772882292, + "name": "Update a group", + "description": "Updates a group by ID.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"description\": \"My Updated Awesome Group\"\n}" + }, + { + "id": "2a20ce60-0a01-c370-8f42-4d67680f9930", + "headers": "X-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463773001644, + "name": "API v1 endpoints", + "description": "", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "2cc7dcd2-5a8d-b356-abdd-784ca7f0ea38", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/placements", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772926573, + "name": "List placements", + "description": "List all placements.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "32b82f81-bd62-392d-1370-09db2373b020", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans", + "preRequestScript": "", + "pathVariables": {}, + "method": "POST", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772955091, + "name": "Create plan (ceph example)", + "description": "Create a plan.\n\nCeph test: 12 OSDs, 3 Monitors, 1 Client", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"plan_name\": \"1d146103-2cae-4945-9821-59785c4454b8\",\n \"resources\": {\n \"00a94e02-3e87-48cc-b90c-ad6e9582170e\": {\n \"Properties\": {\n \"instance_uuid\": \"osd9-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol9-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd9-ceph-r-auto-test_Vol9-ceph-r-auto-test_attachment\"\n },\n \"01cbce96-897f-4698-9d31-430c559929f0\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd1-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd1-ceph-r-auto-test_pipe\"\n },\n \"0293b49f-6962-4d4e-a71f-846e9475ed2b\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd8-ceph-r-auto-test\",\n \"Vol8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol8-osd8-affinity\"\n },\n \"071d9b2b-f854-4057-bf8d-e0c0efffaf8d\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"0847bb36-5551-4cf2-a7da-8a9c0b2a9a4d\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd3-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd3-ceph-r-auto-test_pipe\"\n },\n \"09543e09-3ad1-44de-bc01-d389c88d5f72\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd5-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd5-ceph-r-auto-test_pipe\"\n },\n \"0ab5b173-0a46-4909-83fe-3c7321137bd1\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd4-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd4-ceph-r-auto-test_pipe\"\n },\n \"0adf8c7c-b85d-488b-a123-577cf962a16e\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd2-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd2-ceph-r-auto-test_pipe\"\n },\n \"0bb9dd6d-9242-45a3-91df-c002e829acd1\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd7-ceph-r-auto-test_Vol7-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd7-ceph-r-auto-test_Vol7-ceph-r-auto-test_pipe\"\n },\n \"0db87de8-980d-4f58-b0f2-f6c23f7b0b1b\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd5-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd5-ceph-r-auto-test_pipe\"\n },\n \"0e5fb95f-8bd6-44ee-a1dc-1cdeff028cb2\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd7-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd7-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"0fcafb94-ca3a-4a59-b6e7-87ff2cbbc274\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"12076ade-1037-4ccf-a3a8-0589676f662e\": {\n \"Properties\": {\n \"instance_uuid\": \"mon1-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol14-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"mon1-ceph-r-auto-test_Vol14-ceph-r-auto-test_attachment\"\n },\n \"13233e81-85f7-44fd-8913-9390bae609ff\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd2-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd2-ceph-r-auto-test_pipe\"\n },\n \"132f5e3c-8a99-45e5-8a6b-4409ff896a4c\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd9-ceph-r-auto-test\",\n \"Vol9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol9-osd9-affinity\"\n },\n \"139fb09c-d8da-490b-8a93-845d389ac4e3\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"14645a1e-6f74-430c-9b3f-c240cbae03d9\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd6-ceph-r-auto-test_pipe\"\n },\n \"147072de-d3c1-4e18-888c-0f564a56be66\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol7-ceph-r-auto-test\"\n },\n \"16b28631-6745-4ec0-9fbb-3080c06d6a21\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"176d7a9d-dfd1-4c0c-a6e2-fc354de6e82a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"mon1-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_mon1-ceph-r-auto-test_pipe\"\n },\n \"180e29ae-d448-4b5f-933b-225842be19ca\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd4-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd4-ceph-r-auto-test_pipe\"\n },\n \"19b179ab-ba12-4f0e-8fd0-c0e8f7ce6f6c\": {\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"mon2-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"mon2-ceph-r-auto-test\"\n },\n \"1a5ca6ca-368c-41c0-b66a-149ab45fd3cd\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol9-ceph-r-auto-test\"\n },\n \"1aa9822e-13ca-4bf0-8170-3e4d86496977\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd7-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd7-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"1bb994e0-4f59-4c63-b47c-3e89d95c405b\": {\n \"DependsOn\": [\n \"osd8-ceph-r-auto-test\",\n \"Vol9-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd9-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd9-ceph-r-auto-test\"\n },\n \"1c13b8f6-790f-466e-9771-8bd96e1b3eb0\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd6-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd6-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"1d34273e-b3a1-4bde-8518-8c9e526309ad\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd6-ceph-r-auto-test_Vol6-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd6-ceph-r-auto-test_Vol6-ceph-r-auto-test_pipe\"\n },\n \"1e22b66e-789c-4fcd-bed8-3fe6fca81e5b\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd7-ceph-r-auto-test_port\"\n },\n \"1ecfdaec-6cb5-46c2-b0c0-248a8d10503e\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"osd5-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_osd5-ceph-r-auto-test_pipe\"\n },\n \"2046e868-d6e3-4c16-935e-bef74adba495\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"24961894-bab4-44bc-ba0d-2a52478681aa\": {\n \"DependsOn\": [\n \"osd1-ceph-r-auto-test\",\n \"Vol2-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd2-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd2-ceph-r-auto-test\"\n },\n \"273d27de-cc80-4511-b10f-373cc9ee6479\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"Vol0-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol0-osd0-affinity\"\n },\n \"28657a05-88c7-42b4-a473-c50eff8bd9b8\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol15-ceph-r-auto-test\"\n },\n \"2b7cf863-7073-4109-9a3f-6e1244186c4a\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol11-ceph-r-auto-test\"\n },\n \"2b8ceae0-24af-4d2b-972a-a8523c41cf06\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd3-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd3-ceph-r-auto-test_pipe\"\n },\n \"2dbfd788-042b-434a-91bb-e16007f4d08c\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"2fd571fe-3a88-4737-a12f-2ad4a67e716a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd8-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd8-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"30982ffe-361a-46a7-8834-539cb21100e9\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"Vol2-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol2-osd2-affinity\"\n },\n \"3214090d-95f0-42c4-be65-534a634d857e\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"3260d21a-55a6-4040-9c16-0a6edb3eeec5\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"34256588-40ac-47a9-80bc-309aee92cca5\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"35e46df5-fc9b-4fc8-aa4c-b1dca86ac6dc\": {\n \"DependsOn\": [\n \"osd0-ceph-r-auto-test\",\n \"Vol1-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd1-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd1-ceph-r-auto-test\"\n },\n \"3719264a-5937-4d03-9a32-050d96680c23\": {\n \"DependsOn\": [\n \"osd10-ceph-r-auto-test\",\n \"Vol11-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd11-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd11-ceph-r-auto-test\"\n },\n \"372d4b17-8f0b-4a32-b7fe-e60cecd291a0\": {\n \"DependsOn\": [\n \"osd3-ceph-r-auto-test\",\n \"Vol4-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd4-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd4-ceph-r-auto-test\"\n },\n \"38115bbc-c162-4293-b9a5-f96332a21129\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd9-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd9-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"38a2a423-4bba-4831-9d91-f1a148095175\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd11-ceph-r-auto-test_Vol11-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd11-ceph-r-auto-test_Vol11-ceph-r-auto-test_pipe\"\n },\n \"38de0821-4d1e-4e39-b903-4b5b816e2fb9\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd11-ceph-r-auto-test\",\n \"Vol11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol11-osd11-affinity\"\n },\n \"397fe846-b6e7-48e0-8284-24f4e9b4739d\": {\n \"Properties\": {\n \"instance_uuid\": \"osd10-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol10-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd10-ceph-r-auto-test_Vol10-ceph-r-auto-test_attachment\"\n },\n \"3992c09d-3c0f-485b-b470-88fa8a37bb3e\": {\n \"Properties\": {\n \"instance_uuid\": \"osd1-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol1-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd1-ceph-r-auto-test_Vol1-ceph-r-auto-test_attachment\"\n },\n \"39f0bd4d-a07b-40cd-bdc5-7b5a5a747c69\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"3af94b82-36e7-4263-b1ce-061baf690af7\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd5-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd5-ceph-r-auto-test_pipe\"\n },\n \"3b24f275-2cdb-4bd8-84c6-9054cc972440\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"3b3cd3d8-2796-49dd-aaef-0cba7801d081\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd5-ceph-r-auto-test_port\"\n },\n \"3b7c6b78-3109-47b0-8165-84960492539b\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd1-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd1-ceph-r-auto-test_pipe\"\n },\n \"3baeaa45-f259-49c3-ae07-4750cf0f587b\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"3c249a23-dd13-41e0-a4bf-d801672bd3ea\": {\n \"Properties\": {\n \"level\": \"rack\",\n \"relationship\": \"diversity\",\n \"resources\": [\n \"Vol13-mon0-affinity\",\n \"Vol14-mon1-affinity\",\n \"Vol15-mon2-affinity\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol-mon-diversity\"\n },\n \"3cbb63c0-8622-4803-a379-1edff8aa1250\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd3-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd3-ceph-r-auto-test_pipe\"\n },\n \"3dca0c37-b8fe-4077-9675-894741010b41\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd10-ceph-r-auto-test_Vol10-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd10-ceph-r-auto-test_Vol10-ceph-r-auto-test_pipe\"\n },\n \"3e8dcece-8cee-4b4d-8acb-dc1bbfa29b5d\": {\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"mon1-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"mon1-ceph-r-auto-test\"\n },\n \"3fa55ed0-c7a2-4000-ba42-ff1e6cac9ac2\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol4-ceph-r-auto-test\"\n },\n \"3fdf05fe-9d0d-4852-8580-102bbf9684e7\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"40bfbcca-c37d-4f0f-aeb1-db9eb3e7ca7b\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol10-ceph-r-auto-test\"\n },\n \"420c2d09-aaff-465e-a159-47afbb8a8c1e\": {\n \"Properties\": {\n \"instance_uuid\": \"mon2-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol15-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"mon2-ceph-r-auto-test_Vol15-ceph-r-auto-test_attachment\"\n },\n \"43c83373-cf19-4ec4-a111-5b8a68e34556\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test_Vol0-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_Vol0-ceph-r-auto-test_pipe\"\n },\n \"4423cbf0-214e-4172-a869-cf5f53374133\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd1-ceph-r-auto-test_port\"\n },\n \"4a240437-dcdf-4daf-9920-45c448dbca51\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"4ade1177-e981-4674-8294-e158e1c8a2d8\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"503832bd-2d0e-454d-ba57-29d1da270359\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd6-ceph-r-auto-test_pipe\"\n },\n \"5121c8ab-6775-4b15-994e-f9f538f498ce\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd6-ceph-r-auto-test\",\n \"Vol6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol6-osd6-affinity\"\n },\n \"51881ec0-d972-47d6-8f59-330d4086fb30\": {\n \"Properties\": {\n \"instance_uuid\": \"osd3-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol3-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd3-ceph-r-auto-test_Vol3-ceph-r-auto-test_attachment\"\n },\n \"5bd04e16-014e-46c4-8caa-4b955785e52b\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"5d067ddd-fcad-44de-ac20-53175356c560\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"5ea5ca0c-1c1a-493b-8523-f8b58ba9dea1\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd5-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd5-ceph-r-auto-test_pipe\"\n },\n \"5fad0fe2-e0a1-4e3f-8a7a-0fbba12cdc75\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test_Vol1-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_Vol1-ceph-r-auto-test_pipe\"\n },\n \"604d30f3-bc89-4a23-9a99-7dbebcde684e\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd6-ceph-r-auto-test_port\"\n },\n \"61c194b1-33ff-4c69-9317-c0b5c51982d7\": {\n \"DependsOn\": [\n \"osd4-ceph-r-auto-test\",\n \"Vol5-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd5-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd5-ceph-r-auto-test\"\n },\n \"62827f33-71aa-455a-9ba6-cd1f96996c4f\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol3-ceph-r-auto-test\"\n },\n \"63de85fd-74ac-4b49-8ca5-e77560b1dc8a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd6-ceph-r-auto-test_pipe\"\n },\n \"65edd9f2-ad68-45b1-9c6f-d9656390f4ad\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"osd6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_osd6-ceph-r-auto-test_pipe\"\n },\n \"662d358d-69f7-4ffa-8853-f558c280a0a1\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"mon1-ceph-r-auto-test_Vol14-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"mon1-ceph-r-auto-test_Vol14-ceph-r-auto-test_pipe\"\n },\n \"670ad21b-8cee-4995-b30d-558a90692465\": {\n \"DependsOn\": \"osd11-ceph-r-auto-test\",\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"cl0-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nceph_hosts_install mon0-ceph-r-auto-test mon1-ceph-r-auto-test mon2-ceph-r-auto-test osd0-ceph-r-auto-test osd1-ceph-r-auto-test osd2-ceph-r-auto-test osd3-ceph-r-auto-test osd4-ceph-r-auto-test osd5-ceph-r-auto-test osd6-ceph-r-auto-test osd7-ceph-r-auto-test osd8-ceph-r-auto-test osd9-ceph-r-auto-test osd10-ceph-r-auto-test osd11-ceph-r-auto-test\\necho ceph_main_drives 12 >> /home/ubuntu/drives.txt\\necho ceph_spare_drives 0 >> /home/ubuntu/drives.txt\\necho ceph_total_drives 12 >> /home/ubuntu/drives.txt\\necho ceph_hosts_install mon0-ceph-r-auto-test mon1-ceph-r-auto-test mon2-ceph-r-auto-test osd0-ceph-r-auto-test osd1-ceph-r-auto-test osd2-ceph-r-auto-test osd3-ceph-r-auto-test osd4-ceph-r-auto-test osd5-ceph-r-auto-test osd6-ceph-r-auto-test osd7-ceph-r-auto-test osd8-ceph-r-auto-test osd9-ceph-r-auto-test osd10-ceph-r-auto-test osd11-ceph-r-auto-test > /home/ubuntu/addedhosts.txt\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"cl0-ceph-r-auto-test\"\n },\n \"68a7b48c-b1cd-4291-a2a9-6bfad0e98f78\": {\n \"DependsOn\": \"mon1-ceph-r-auto-test\",\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"mon2-ceph-r-auto-test_port\"\n },\n \"6914a752-09a6-4db8-b1f3-96a5fe57ca3a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd7-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd7-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"6abe8380-01b5-432f-ab1b-f55409741305\": {\n \"DependsOn\": [\n \"mon0-ceph-r-auto-test\",\n \"Vol0-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd0-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd0-ceph-r-auto-test\"\n },\n \"6bc7e4b2-2f51-4222-8b2e-61e6539b26b0\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"Vol12-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol12-cl0-affinity\"\n },\n \"6d002d81-9fe0-4343-8be8-7371cde938c2\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd8-ceph-r-auto-test_port\"\n },\n \"6d27687c-ae13-423c-8784-24aeeeedd55e\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol1-ceph-r-auto-test\"\n },\n \"6dcad07f-4539-4ed0-8c47-e415f0f4c9a5\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test_Vol4-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_Vol4-ceph-r-auto-test_pipe\"\n },\n \"6dd496ad-ffa6-45b6-ac4c-0b108de80a38\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol8-ceph-r-auto-test\"\n },\n \"6ee994a7-2ea6-4ffe-a60d-0d99d8556ff6\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"6f65d65e-fb9c-457d-a796-c5adbc3805b3\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd5-ceph-r-auto-test\",\n \"osd6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd5-ceph-r-auto-test_osd6-ceph-r-auto-test_pipe\"\n },\n \"6f855c0c-fcc9-432d-b9fa-c5cbed893ea7\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd5-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd5-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"6fc05f90-7c2c-4a69-9150-d8a0b9e83ea6\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd6-ceph-r-auto-test_pipe\"\n },\n \"70ab8430-b0cc-4195-940a-605d339021c6\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"7756a8e2-48ae-4874-8076-fcf82e72943d\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd7-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd7-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"7b5cda9b-8802-457e-ae37-3f2a52fabe3e\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd4-ceph-r-auto-test_port\"\n },\n \"7fd80823-da8a-4b0e-9acd-53b46866178a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd3-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd3-ceph-r-auto-test_pipe\"\n },\n \"8221bdca-a56b-4a3d-b4f7-249fff7aa61e\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"83d2dd40-a468-4c39-a904-aefd187b3795\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd10-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd10-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"8410e367-7760-41c8-b551-00702767c2ed\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"cl0-ceph-r-auto-test_port\"\n },\n \"84e11653-eba1-41b3-a069-1d5dfe8d5708\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"8ae0d117-f184-4e74-bf04-736056e96408\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd10-ceph-r-auto-test\",\n \"Vol10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol10-osd10-affinity\"\n },\n \"8cd1afeb-8886-46c2-85b1-eb9ed389e02b\": {\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"mon0-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"mon0-ceph-r-auto-test\"\n },\n \"912ad104-97d7-4b5d-9a40-ad33baa38864\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd8-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd8-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"96e1d69c-3b25-454d-a543-1fd0d0165aa6\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd0-ceph-r-auto-test_port\"\n },\n \"9749a54b-b4bd-4cef-b97f-4749afde91ac\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"mon2-ceph-r-auto-test_Vol15-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"mon2-ceph-r-auto-test_Vol15-ceph-r-auto-test_pipe\"\n },\n \"98507471-01f3-41d1-b9cd-9208ad59ebb6\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"mon0-ceph-r-auto-test\",\n \"Vol13-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol13-mon0-affinity\"\n },\n \"99e17b1c-d301-4dd1-98c6-8f414439737d\": {\n \"Properties\": {\n \"instance_uuid\": \"cl0-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol12-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"cl0-ceph-r-auto-test_Vol12-ceph-r-auto-test_attachment\"\n },\n \"9a157a34-6b13-4136-862d-1ae7d6226862\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"mon2-ceph-r-auto-test\",\n \"Vol15-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol15-mon2-affinity\"\n },\n \"9aaf462e-b5df-4b21-93e5-d52391d9b6f4\": {\n \"DependsOn\": [\n \"osd2-ceph-r-auto-test\",\n \"Vol3-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd3-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd3-ceph-r-auto-test\"\n },\n \"9c6f50c4-943f-4e3b-a7c2-f073cc99fcd0\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"Vol3-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol3-osd3-affinity\"\n },\n \"9d40a88a-398f-4b38-9b36-98cc794451ad\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd9-ceph-r-auto-test_port\"\n },\n \"9dc34036-8f8e-409f-9eb5-ab82e861f9a2\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol2-ceph-r-auto-test\"\n },\n \"9eab90dc-5e16-4512-90d5-8f93904061bf\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"Vol4-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol4-osd4-affinity\"\n },\n \"a1463b9e-5e87-4db4-bf64-829fe49ced46\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd5-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd5-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"a3840a97-0ea4-4137-8644-e92e526c77ac\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"a3b0886c-f53a-4c48-9766-fee8e83bd39a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd5-ceph-r-auto-test_Vol5-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd5-ceph-r-auto-test_Vol5-ceph-r-auto-test_pipe\"\n },\n \"a3e6e931-0d92-47d1-880d-d137d795c4c2\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd5-ceph-r-auto-test\",\n \"Vol5-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol5-osd5-affinity\"\n },\n \"a6fa04f4-2486-4e8f-af87-93b8e8bbeee9\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"Vol1-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol1-osd1-affinity\"\n },\n \"a7be7acb-2961-48a4-a540-e6a8d6ebee21\": {\n \"Properties\": {\n \"instance_uuid\": \"osd7-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol7-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd7-ceph-r-auto-test_Vol7-ceph-r-auto-test_attachment\"\n },\n \"a841fb37-506a-4691-b598-49d3f600c87a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd5-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd5-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"a97901ed-5233-48f6-b87c-341103e8a061\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"aa363330-34e6-4f9d-ac0e-e4a78046e6ad\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd1-ceph-r-auto-test\",\n \"osd4-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd1-ceph-r-auto-test_osd4-ceph-r-auto-test_pipe\"\n },\n \"ab3ec1d6-1d65-469b-844b-1f8a41e23ff7\": {\n \"Properties\": {\n \"instance_uuid\": \"mon0-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol13-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"mon0-ceph-r-auto-test_Vol13-ceph-r-auto-test_attachment\"\n },\n \"ad17f5c6-dd14-4f08-aab1-9907d5957cfb\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol5-ceph-r-auto-test\"\n },\n \"adc5d858-5d86-4091-8115-e02ab0046b6b\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"af34369a-4aa4-455e-a09f-b03fea3cb3db\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd0-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd0-ceph-r-auto-test_pipe\"\n },\n \"b4df92fe-738c-4e89-bc3f-44d38786f512\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd8-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd8-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"b5c8670a-a538-457e-ae6b-55ec5c7e0a4d\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd5-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd5-ceph-r-auto-test_pipe\"\n },\n \"b6c489e2-b2cd-475d-a656-d78c5357be4c\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd6-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd6-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"b7bca289-eb9a-46f9-bb4f-0d940700f11c\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd6-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd6-ceph-r-auto-test_pipe\"\n },\n \"b825ba9c-cbb1-40a4-9e97-d0ba8a5a4966\": {\n \"Properties\": {\n \"instance_uuid\": \"osd2-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol2-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd2-ceph-r-auto-test_Vol2-ceph-r-auto-test_attachment\"\n },\n \"b9500bd1-367e-4f69-95b2-137beb03d198\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd4-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd4-ceph-r-auto-test_pipe\"\n },\n \"bc9cf867-2f32-4388-9cd9-40fe2de8da94\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test_Vol12-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_Vol12-ceph-r-auto-test_pipe\"\n },\n \"bd680ded-ad47-47e6-ad8f-373cc79dfd7e\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd11-ceph-r-auto-test_port\"\n },\n \"bf157b9f-5726-48b4-8bde-c1a219d348d9\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"bf541f37-2e58-4d81-922c-566e211b57c3\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"c25f395c-5e15-4e8c-8d7d-52a2689fa8d5\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test_Vol2-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_Vol2-ceph-r-auto-test_pipe\"\n },\n \"c2c268c1-2e5d-4329-89e7-9e87e10b007a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"mon2-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_mon2-ceph-r-auto-test_pipe\"\n },\n \"c37d830c-af49-4e4f-81b3-5b228b480fec\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol6-ceph-r-auto-test\"\n },\n \"c4ac9f61-4df7-4241-9bfb-7c39a2498518\": {\n \"Properties\": {\n \"instance_uuid\": \"osd5-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol5-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd5-ceph-r-auto-test_Vol5-ceph-r-auto-test_attachment\"\n },\n \"c71c70e2-d56b-4f0f-8c6c-c856d7e62bc9\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd4-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd4-ceph-r-auto-test_pipe\"\n },\n \"c9c76694-7742-439c-92a8-bff6c2e2c389\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd8-ceph-r-auto-test_Vol8-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd8-ceph-r-auto-test_Vol8-ceph-r-auto-test_pipe\"\n },\n \"ca1102ac-c203-43cd-87d3-439aa6d2a0ac\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd9-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd9-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"caa0ec5c-5e3b-4a11-b231-56b467b9afe0\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"mon0-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_mon0-ceph-r-auto-test_pipe\"\n },\n \"cb03f07f-b47d-498b-bec2-3417ece4b5a3\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd5-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd5-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"cd939db7-3af1-4af1-894f-9e5482e5d441\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd5-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd5-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"ced00be7-afac-4e19-ad3e-a0f51e6ff426\": {\n \"Properties\": {\n \"instance_uuid\": \"osd6-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol6-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd6-ceph-r-auto-test_Vol6-ceph-r-auto-test_attachment\"\n },\n \"cffc6462-0aa9-4ad2-8a60-64f62dfae80e\": {\n \"DependsOn\": [\n \"osd9-ceph-r-auto-test\",\n \"Vol10-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd10-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd10-ceph-r-auto-test\"\n },\n \"d26fae65-b0e0-4955-adcc-c2a8c0d1b4a7\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol0-ceph-r-auto-test\"\n },\n \"da08b866-0700-4e4c-aa05-1362e33b2171\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol12-ceph-r-auto-test\"\n },\n \"da5eb061-5295-4740-9a91-d10f7dc19c70\": {\n \"Properties\": {\n \"instance_uuid\": \"osd0-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol0-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd0-ceph-r-auto-test_Vol0-ceph-r-auto-test_attachment\"\n },\n \"daaf3ad9-5b56-47ad-a5fc-92f4403c5af9\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd3-ceph-r-auto-test_Vol3-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd3-ceph-r-auto-test_Vol3-ceph-r-auto-test_pipe\"\n },\n \"dc46eabe-40a1-4dbc-b6a3-91dbb465c9ed\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"osd7-ceph-r-auto-test\",\n \"Vol7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol7-osd7-affinity\"\n },\n \"decb8d75-4f62-4ea4-afaa-629e86ba5a03\": {\n \"DependsOn\": [\n \"osd7-ceph-r-auto-test\",\n \"Vol8-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd8-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd8-ceph-r-auto-test\"\n },\n \"df4602b6-1c5f-49b2-81e6-81677cdd73d8\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd2-ceph-r-auto-test_port\"\n },\n \"df9f81d4-491f-405e-94df-6c4e29a04e07\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"mon0-ceph-r-auto-test_port\"\n },\n \"e06f4a48-ff6d-49a4-8acf-842891c347c4\": {\n \"DependsOn\": \"mon0-ceph-r-auto-test\",\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"mon1-ceph-r-auto-test_port\"\n },\n \"e17ec745-650d-4cb5-a32c-edf1d3767037\": {\n \"Properties\": {\n \"instance_uuid\": \"osd11-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol11-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd11-ceph-r-auto-test_Vol11-ceph-r-auto-test_attachment\"\n },\n \"e1e4389f-854f-4a1f-a8b1-efab5c7943f1\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd10-ceph-r-auto-test_port\"\n },\n \"e282f7c0-5f48-4df4-af7d-9cb64b7c8c38\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"cl0-ceph-r-auto-test\",\n \"osd8-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"cl0-ceph-r-auto-test_osd8-ceph-r-auto-test_pipe\"\n },\n \"e5004def-bb60-462a-b924-4f6516b39c79\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol14-ceph-r-auto-test\"\n },\n \"e57d61bd-d64e-4da5-a3e6-d314b6fec69f\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd6-ceph-r-auto-test\",\n \"osd9-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd6-ceph-r-auto-test_osd9-ceph-r-auto-test_pipe\"\n },\n \"e6c21917-1497-47eb-8401-5b1082430ac7\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"mon0-ceph-r-auto-test_Vol13-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"mon0-ceph-r-auto-test_Vol13-ceph-r-auto-test_pipe\"\n },\n \"e6d8b420-7b33-40ab-9366-7c92a84cf3e3\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd2-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd2-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"e7e7d090-80cb-4a47-ab58-ea0bee690d0a\": {\n \"DependsOn\": [\n \"osd6-ceph-r-auto-test\",\n \"Vol7-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd7-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd7-ceph-r-auto-test\"\n },\n \"e8eaa8fc-45bc-4bad-b89e-0e63be6dfbbc\": {\n \"Properties\": {\n \"instance_uuid\": \"osd4-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol4-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd4-ceph-r-auto-test_Vol4-ceph-r-auto-test_attachment\"\n },\n \"eb770fe2-abbb-47a0-9e3c-270311581e65\": {\n \"DependsOn\": [\n \"osd5-ceph-r-auto-test\",\n \"Vol6-ceph-r-auto-test\"\n ],\n \"Properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu14_04\",\n \"key_name\": \"demo\",\n \"name\": \"osd6-ceph-r-auto-test\",\n \"networks\": [\n {\n \"port\": \"None\"\n }\n ],\n \"user_data\": \"#!/bin/sh\\nwait_for_vol_attach --device-file /dev/vdb --tries 360 --sleep-seconds 10\\nsudo mkfs.ext4 /dev/vdb\\nsudo mkdir /ceph\\nsudo mount /dev/vdb /ceph\\n\"\n },\n \"Type\": \"OS::Nova::Server\",\n \"name\": \"osd6-ceph-r-auto-test\"\n },\n \"ec07778a-c1b5-4864-8e99-b446b6e2aec3\": {\n \"Properties\": {\n \"fixed_ips\": [\n {\n \"subnet_id\": \"955bf74d-963d-4d34-874c-8d4b3038ff71\"\n }\n ],\n \"network_id\": \"ba0c9c76-0239-409a-8a0d-0d5dece969f3\"\n },\n \"Type\": \"OS::Neutron::Port\",\n \"name\": \"osd3-ceph-r-auto-test_port\"\n },\n \"ec6381b5-91bf-4d60-963d-258713d338e5\": {\n \"Properties\": {\n \"level\": \"host\",\n \"relationship\": \"affinity\",\n \"resources\": [\n \"mon1-ceph-r-auto-test\",\n \"Vol14-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol14-mon1-affinity\"\n },\n \"f1af5d56-9a01-45b0-816e-5a260e2f5e50\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"f1c35370-a920-4852-a95c-d57950bebc2a\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 48.1,\n \"min\": 48.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd9-ceph-r-auto-test_Vol9-ceph-r-auto-test_attachment\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd9-ceph-r-auto-test_Vol9-ceph-r-auto-test_pipe\"\n },\n \"f36f5018-dc72-48da-9bc5-7c9cbfab742c\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd0-ceph-r-auto-test\",\n \"osd2-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd0-ceph-r-auto-test_osd2-ceph-r-auto-test_pipe\"\n },\n \"f539b4f7-a4d0-4f34-ba7a-b25f9638c9ba\": {\n \"Properties\": {\n \"size\": 16\n },\n \"Type\": \"OS::Cinder::Volume\",\n \"name\": \"Vol13-ceph-r-auto-test\"\n },\n \"f6850da8-0506-482d-b3fd-1bf9575d43f0\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd4-ceph-r-auto-test\",\n \"osd11-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd4-ceph-r-auto-test_osd11-ceph-r-auto-test_pipe\"\n },\n \"f7c3c602-0f95-43a3-9df3-1ae4432b4d39\": {\n \"Properties\": {\n \"level\": \"rack\",\n \"relationship\": \"diversity\",\n \"resources\": [\n \"Vol0-osd0-affinity\",\n \"Vol1-osd1-affinity\",\n \"Vol2-osd2-affinity\",\n \"Vol3-osd3-affinity\",\n \"Vol4-osd4-affinity\",\n \"Vol5-osd5-affinity\",\n \"Vol6-osd6-affinity\",\n \"Vol7-osd7-affinity\",\n \"Vol8-osd8-affinity\",\n \"Vol9-osd9-affinity\",\n \"Vol10-osd10-affinity\",\n \"Vol11-osd11-affinity\"\n ]\n },\n \"Type\": \"ATT::Valet::GroupAssignment\",\n \"name\": \"Vol-osd-diversity\"\n },\n \"fb0428d4-5e28-4f7f-ad13-e497df47a645\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd6-ceph-r-auto-test\",\n \"osd7-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd6-ceph-r-auto-test_osd7-ceph-r-auto-test_pipe\"\n },\n \"fe046b99-d2d1-4111-b230-6c7151e252d2\": {\n \"Properties\": {\n \"access_type\": \"mixed\",\n \"bandwidth\": {\n \"max\": 24.1,\n \"min\": 24.1,\n \"tolerance\": 1.0,\n \"units\": \"Mbps\"\n },\n \"resources\": [\n \"osd6-ceph-r-auto-test\",\n \"osd10-ceph-r-auto-test\"\n ]\n },\n \"Type\": \"ATT::Valet::Pipe\",\n \"name\": \"osd6-ceph-r-auto-test_osd10-ceph-r-auto-test_pipe\"\n },\n \"ff420950-44c6-4f0d-9199-6652c61a63aa\": {\n \"Properties\": {\n \"instance_uuid\": \"osd8-ceph-r-auto-test\",\n \"mountpoint\": \"/dev/vdb\",\n \"volume_id\": \"Vol8-ceph-r-auto-test\"\n },\n \"Type\": \"OS::Cinder::VolumeAttachment\",\n \"name\": \"osd8-ceph-r-auto-test_Vol8-ceph-r-auto-test_attachment\"\n }\n },\n \"stack_id\": \"1d146103-2cae-4945-9821-59785c4454b8\",\n \"timeout\": \"60 sec\"\n}" + }, + { + "id": "4c58314a-7f47-7c64-9c8f-abf1bc64aed5", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}/members", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772913590, + "name": "Group Members", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "57b2dc09-eb7b-c0a9-7f2f-91f7c6686418", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/placements/b71bedad-dd57-4942-a7bd-ab074b72d652", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772930285, + "name": "List single placement, no replan", + "description": "List placement given an orchestration id", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "5b6f5c2a-3270-55b5-1332-d32ca67577a6", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772905231, + "name": "Groups", + "description": "Supported methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "66f61587-10f9-9454-d9b8-7f1f0748b7b1", + "headers": "X-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463773008585, + "name": "API v1 endpoints", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "6717d467-8a9e-fe53-1fcc-b7a0af7f5516", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans/e624474b-fc80-4053-ab5f-45cc1030e692", + "preRequestScript": "", + "pathVariables": {}, + "method": "POST", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772951806, + "name": "Create plan (error)", + "description": "This is a malformed API request.\n\nCan't POST to a plan ID. This should return an error.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "" + }, + { + "id": "69ee343d-bef9-49cc-23bd-24c1197b1202", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/placements", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772936810, + "name": "Placements", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "6fcbe513-81d4-2314-6a0c-871fc5ce2dab", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/status", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463773021703, + "name": "Status", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "7f2210d2-5a8d-f827-64ac-77973c3019c4", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}/members", + "preRequestScript": "", + "pathVariables": {}, + "method": "DELETE", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772896715, + "name": "Delete all group members", + "description": "Deletes all members from a group.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "" + }, + { + "id": "832f3e6d-e96c-de32-f7bf-7a4fe2607a42", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{keystone}}/v2.0/tokens/{{auth_token}}", + "preRequestScript": "", + "pathVariables": {}, + "method": "DELETE", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1460055204876, + "name": "Keystone Delete Token v2", + "description": "Delete keystone token", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "" + }, + { + "id": "84654cd5-8bfa-9d5e-3cfa-8dde36db3088", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772910350, + "name": "Single Group", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "909ecbf8-0d2a-2c3d-6ad3-2f91071bba68", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}/members/b7d0e9b175294b649464caa3411adb3f", + "preRequestScript": "", + "pathVariables": {}, + "method": "DELETE", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772893228, + "name": "Delete group member", + "description": "Deletes a member from a group.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "" + }, + { + "id": "9aa51fea-d49b-525f-2253-d96f5ba2933e", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}", + "preRequestScript": "", + "pathVariables": {}, + "method": "DELETE", + "data": [], + "dataMode": "raw", + "tests": "postman.clearGlobalVariable(\"group_id\");", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772902126, + "name": "Delete group", + "description": "Deletes a group.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "" + }, + { + "id": "9aacf46b-ed0d-1ff5-fa18-014559eef1e1", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups/{{group_id}}/members", + "preRequestScript": "", + "pathVariables": {}, + "method": "PUT", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772885690, + "name": "Update group members", + "description": "Merges existing group members with a new list.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"members\": [\n \"b7d0e9b175294b649464caa3411adb3f\"\n ]\n}" + }, + { + "id": "9defd429-f48a-c8ae-2c3b-a4ee3c229af0", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/placements/b71bedad-dd57-4942-a7bd-ab074b72d652", + "preRequestScript": "", + "pathVariables": {}, + "method": "POST", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772933469, + "name": "List placements with replan", + "description": "List and possibly replan placements given an orchestration id", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"locations\": [\"qos102\", \"qos103\", \"qos104\", \"qos106\", \"qos107\"]\n}" + }, + { + "id": "a0b31366-660a-99a2-6bae-03089b1001b5", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans/e624474b-fc80-4053-ab5f-45cc1030e692", + "preRequestScript": "", + "pathVariables": {}, + "method": "DELETE", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772983284, + "name": "Delete plan", + "description": "Deletes a plan.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "" + }, + { + "id": "a37d1925-6cd8-c48b-705b-72c9643b3fb1", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans/e624474b-fc80-4053-ab5f-45cc1030e692", + "preRequestScript": "", + "pathVariables": {}, + "method": "PUT", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463877618440, + "name": "Migrate Resource in a Plan", + "description": "Migrate a single resource in a plan.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"action\": \"migrate\",\n \"excluded_hosts\": [\"qos104\", \"qos106\", \"qos107\"],\n \"resources\": [\"b71bedad-dd57-4942-a7bd-ab074b72d652\"],\n \"timeout\": \"60 sec\"\n}" + }, + { + "id": "a62314a3-9dbf-07bf-b978-83b12f14d6d3", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/plans/e624474b-fc80-4053-ab5f-45cc1030e692", + "preRequestScript": "", + "pathVariables": {}, + "method": "PUT", + "data": [], + "dataMode": "raw", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463873686204, + "name": "Update a plan (not yet used)", + "description": "Updates a plan by ID.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"action\": \"update\",\n \"resources\": {\n \"a649424b-dd57-1431-befc-45cc4b72d653\": {\n \"properties\": {\n \"flavor\": \"m1.small\",\n \"image\": \"ubuntu12_04\",\n \"key_name\": \"demo\",\n \"networks\": [\n {\n \"network\": \"demo-net\"\n }\n ]\n },\n \"type\": \"OS::Nova::Server\",\n \"name\": \"my_new_instance\"\n }\n },\n \"timeout\": \"60 sec\"\n}\n" + }, + { + "id": "ab18d511-890d-d13e-baea-49cb77121a3b", + "headers": "Content-Type: application/json\nAccept: application/json\n", + "url": "{{keystone}}/v2.0/tokens", + "preRequestScript": "", + "pathVariables": {}, + "method": "POST", + "data": [], + "dataMode": "raw", + "tests": "var data = JSON.parse(responseBody);\npostman.setEnvironmentVariable(\"auth_token\", data['access']['token']['id']);\npostman.setEnvironmentVariable(\"tenant_id\", data['access']['token']['tenant']['id']);", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1460499553922, + "name": "Keystone Generate Token v2", + "description": "Generate a token for use", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"auth\": {\n \"tenantName\": \"{{tenant_name}}\",\n \"passwordCredentials\": {\n \"username\": \"{{username}}\",\n \"password\": \"{{password}}\"\n }\n }\n}" + }, + { + "id": "c51917b7-cc34-3b7a-ce4a-74e5df50d574", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/placements/bcfc4498-2336-4d4d-8ea5-da5959875819", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772940050, + "name": "Single Placement", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "c852a502-6158-016c-764e-946fa13905df", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772837765, + "name": "List groups", + "description": "Lists groups.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "cb751a74-fe58-5b78-5e85-85c2c23a136b", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/status", + "preRequestScript": "", + "pathVariables": {}, + "method": "HEAD", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463773014300, + "name": "Get status of all components", + "description": "Get combined status of all component systems.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "d00847d3-e72e-062a-ec1a-029f8dbc49d5", + "headers": "X-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/", + "preRequestScript": "", + "pathVariables": {}, + "method": "OPTIONS", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463773005661, + "name": "API", + "description": "Supported Methods", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "db1cf4f3-8ad1-321d-c5fd-b7faf9821bff", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/status", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463773018103, + "name": "List status of all components", + "description": "List detailed status of each component.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "f3b36953-a643-fd17-d38e-1842e200d3ff", + "headers": "X-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/", + "preRequestScript": "", + "pathVariables": {}, + "method": "GET", + "data": [], + "dataMode": "params", + "tests": "", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772998506, + "name": "API Root", + "description": "Root API endpoint\n", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3" + }, + { + "id": "fe17a04d-abbf-03de-ba36-9a7ee1541502", + "headers": "Content-Type: application/json\nX-Auth-Token: {{auth_token}}\n", + "url": "{{valet}}/v1/groups", + "preRequestScript": "", + "pathVariables": {}, + "method": "POST", + "data": [], + "dataMode": "raw", + "tests": "var data = JSON.parse(responseBody);\npostman.setEnvironmentVariable(\"group_id\", data['id']);", + "currentHelper": "normal", + "helperAttributes": {}, + "time": 1463772834694, + "name": "Create group", + "description": "Create a group.", + "collectionId": "1d5ba476-f6c7-a0c0-d664-a331c00926c3", + "rawModeData": "{\n \"name\": \"group\",\n \"description\": \"My Awesome Group\",\n \"type\": \"exclusivity\"\n}" + } + ] +} \ No newline at end of file diff --git a/valet/tests/api/__init__.py b/valet/tests/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/api/config.py b/valet/tests/api/config.py new file mode 100644 index 0000000..0a089a8 --- /dev/null +++ b/valet/tests/api/config.py @@ -0,0 +1,84 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from pecan.hooks import TransactionHook + +from valet.api.db import models + + +# Server Specific Configurations +server = { + 'port': '8090', + 'host': '0.0.0.0' +} + +# Pecan Application Configurations +app = { + 'root': 'valet_api.controllers.root.RootController', + 'modules': ['valet_api'], + 'default_renderer': 'json', + 'force_canonical': False, + 'hooks': [ + TransactionHook( + models.start, + models.start_read_only, + models.commit, + models.rollback, + models.clear + ), + ], + 'static_root': '%(confdir)s/../../public', + # 'template_path': '%(confdir)s/../templates', + 'debug': True, +} + +logging = { + 'root': {'level': 'INFO', 'handlers': ['console']}, + 'loggers': { + 'valet_api': {'level': 'DEBUG', 'handlers': ['console']}, + 'pecan': {'level': 'DEBUG', 'handlers': ['console']}, + 'py.warnings': {'handlers': ['console']}, + '__force_dict__': True + }, + 'handlers': { + 'console': { + 'level': 'DEBUG', + 'class': 'logging.StreamHandler', + 'formatter': 'color' + } + }, + 'formatters': { + 'simple': { + 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]' + '[%(threadName)s] %(message)s') + }, + 'color': { + '()': 'pecan.log.ColorFormatter', + 'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]' + '[%(threadName)s] %(message)s'), '__force_dict__': True + } + } +} + +sqlalchemy = { + 'url': 'sqlite:///dev.db', + 'echo': True, + 'echo_pool': True, + 'pool_recycle': 3600, + 'encoding': 'utf-8' +} diff --git a/valet/tests/api/conftest.py b/valet/tests/api/conftest.py new file mode 100644 index 0000000..e4f6f23 --- /dev/null +++ b/valet/tests/api/conftest.py @@ -0,0 +1,199 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy +import os +from pecan import conf +from pecan import configuration +from pecan.testing import load_test_app +import pytest + +# TODO(JD): Make this work for music or write a separate test. +from sqlalchemy import create_engine +from sqlalchemy.pool import NullPool +import subprocess + +from valet.api.db import models as _db + +PARAMS = 'charset=utf8' +DBNAME = 'valettest' +BIND = 'mysql+pymysql://root:password@127.0.0.1' + + +def config_file(): + here = os.path.abspath(os.path.dirname(__file__)) + return os.path.join(here, 'config.py') + + +@pytest.fixture(scope='session') +def app(request): + config = configuration.conf_from_file(config_file()).to_dict() + + # Add the appropriate connection string to the app config. + config['sqlalchemy'] = { + 'url': '%s/%s?%s' % (BIND, DBNAME, PARAMS), + 'encoding': 'utf-8', + 'poolclass': NullPool + } + + # Set up a fake app + app = TestApp(load_test_app(config)) + return app + + +@pytest.fixture(scope='session') +def connection(app, request): + """Session-wide test database.""" + # Connect and create the temporary database + print("=" * 80) + print("CREATING TEMPORARY DATABASE FOR TESTS") + print("=" * 80) + subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'drop', DBNAME]) + subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'create', DBNAME]) + + # Bind and create the database tables + _db.clear() + engine_url = '%s/%s?%s' % (BIND, DBNAME, PARAMS) + + db_engine = create_engine( + engine_url, + encoding='utf-8', + poolclass=NullPool) + + # AKA models.start() + _db.Session.bind = db_engine + _db.metadata.bind = _db.Session.bind + + _db.Base.metadata.create_all(db_engine) + _db.commit() + _db.clear() + + # connection = db_engine.connect() + + def teardown(): + _db.Base.metadata.drop_all(db_engine) + + request.addfinalizer(teardown) + + # Slap our test app on it + _db.app = app + return _db + + +@pytest.fixture(scope='function') +def session(connection, request): + """Creates a new database session for a test.""" + _config = configuration.conf_from_file(config_file()).to_dict() + config = deepcopy(_config) + + # Add the appropriate connection string to the app config. + config['sqlalchemy'] = { + 'url': '%s/%s?%s' % (BIND, DBNAME, PARAMS), + 'encoding': 'utf-8', + 'poolclass': NullPool + } + + connection.start() + + def teardown(): + from sqlalchemy.engine import reflection + + # Tear down and dispose the DB binding + connection.clear() + + # start a transaction + engine = conf.sqlalchemy.engine + conn = engine.connect() + trans = conn.begin() + + inspector = reflection.Inspector.from_engine(engine) + + # gather all data first before dropping anything. + # some DBs lock after things have been dropped in + # a transaction. + conn.execute("SET FOREIGN_KEY_CHECKS = 0") + table_names = inspector.get_table_names() + for table in table_names: + conn.execute("TRUNCATE TABLE %s" % table) + conn.execute("SET FOREIGN_KEY_CHECKS = 1") + + trans.commit() + conn.close() + + request.addfinalizer(teardown) + return connection + + +class TestApp(object): + """ A controller test starts a database transaction and creates a fake WSGI app. """ + + __headers__ = {} + + def __init__(self, app): + self.app = app + + def _do_request(self, url, method='GET', **kwargs): + methods = { + 'GET': self.app.get, + 'POST': self.app.post, + 'POSTJ': self.app.post_json, + 'PUT': self.app.put, + 'DELETE': self.app.delete + } + kwargs.setdefault('headers', {}).update(self.__headers__) + return methods.get(method, self.app.get)(str(url), **kwargs) + + def post_json(self, url, **kwargs): + """ note: + + @param (string) url - The URL to emulate a POST request to + @returns (paste.fixture.TestResponse) + """ + return self._do_request(url, 'POSTJ', **kwargs) + + def post(self, url, **kwargs): + """ note: + + @param (string) url - The URL to emulate a POST request to + @returns (paste.fixture.TestResponse) + """ + return self._do_request(url, 'POST', **kwargs) + + def get(self, url, **kwargs): + """ note: + + @param (string) url - The URL to emulate a GET request to + @returns (paste.fixture.TestResponse) + """ + return self._do_request(url, 'GET', **kwargs) + + def put(self, url, **kwargs): + """ note: + + @param (string) url - The URL to emulate a PUT request to + @returns (paste.fixture.TestResponse) + """ + return self._do_request(url, 'PUT', **kwargs) + + def delete(self, url, **kwargs): + """ note: + + @param (string) url - The URL to emulate a DELETE request to + @returns (paste.fixture.TestResponse) + """ + return self._do_request(url, 'DELETE', **kwargs) diff --git a/valet/tests/api/controllers/__init__.py b/valet/tests/api/controllers/__init__.py new file mode 100644 index 0000000..8ad56b9 --- /dev/null +++ b/valet/tests/api/controllers/__init__.py @@ -0,0 +1,27 @@ +from uuid import UUID + + +def is_valid_uuid4(uuid_string): + """ Validate that a UUID string is in fact a valid uuid4. + + Happily, the uuid module does the actual + checking for us. + + It is vital that the 'version' kwarg be passed + to the UUID() call, otherwise any 32-character + hex string is considered valid. + """ + + try: + val = UUID(uuid_string, version=4) + except ValueError: + # If it's a value error, then the string + # is not a valid hex code for a UUID. + return False + + # If the uuid_string is a valid hex code, + # but an invalid uuid4, + # the UUID.__init__ will convert it to a + # valid uuid4. This is bad for validation purposes. + + return str(val) == uuid_string or val.hex == uuid_string diff --git a/valet/tests/api/controllers/test_plans.py b/valet/tests/api/controllers/test_plans.py new file mode 100644 index 0000000..13bae4c --- /dev/null +++ b/valet/tests/api/controllers/test_plans.py @@ -0,0 +1,100 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from uuid import uuid4 + +from valet.api.db.models import Plan, Placement +from valet.tests.api.controllers import is_valid_uuid4 + +# TODO(JD): Add Keystone mock object. +STACK_ID = 'e624474b-fc80-4053-ab5f-45cc1030e692' +PLAN_NAME = 'ihaveaplan' + + +class TestPlansController(object): + def test_get_index_no_plans(self, session): + result = session.app.get('/v1/plans/') + assert result.status_int == 200 + assert result.json == [] + + def test_get_index_a_plan(self, session): + Plan(PLAN_NAME, STACK_ID) + session.commit() + result = session.app.get('/v1/plans/').json + assert result == [PLAN_NAME] + + def test_single_plan_should_have_one_item(self, session): + Plan(PLAN_NAME, STACK_ID) + session.commit() + result = session.app.get('/v1/plans/') + assert result.status_int == 200 + assert len(result.json) == 1 + + def test_list_a_few_plans(self, session): + for plan_number in range(20): + stack_id = str(uuid4()) + Plan('foo_%s' % plan_number, stack_id) + session.commit() + + result = session.app.get('/v1/plans/') + json = result.json + assert result.status_int == 200 + assert len(json) == 20 + + +class TestPlansItemController(object): + def test_get_index_single_plan(self, session): + Plan(PLAN_NAME, STACK_ID) + session.commit() + result = session.app.get('/v1/plans/%s/' % (STACK_ID)) + assert result.status_int == 200 + + def test_get_index_no_plan(self, session): + result = session.app.get('/v1/plans/%s/' % (STACK_ID), + expect_errors=True) + assert result.status_int == 404 + + def test_get_index_single_plan_data(self, session): + Plan(PLAN_NAME, STACK_ID) + session.commit() + result = session.app.get('/v1/plans/%s/' % (STACK_ID)) + json = result.json + assert is_valid_uuid4(json['id']) + assert json['name'] == PLAN_NAME + assert json['placements'] == {} + assert json['stack_id'] == STACK_ID + + def test_get_plan_refs(self, session): + plan = Plan(PLAN_NAME, STACK_ID) + Placement( + 'placement_1', str(uuid4()), + plan=plan, + location='foo_1' + ) + Placement( + 'placement_2', str(uuid4()), + plan=plan, + location='foo_2' + ) + session.commit() + result = session.app.get('/v1/plans/%s/' % (STACK_ID)) + json = result.json + assert is_valid_uuid4(json['id']) + assert json['name'] == PLAN_NAME + assert json['stack_id'] == STACK_ID + assert len(json['placements']) == 2 diff --git a/valet/tests/base.py b/valet/tests/base.py new file mode 100644 index 0000000..393df20 --- /dev/null +++ b/valet/tests/base.py @@ -0,0 +1,40 @@ +''' +Created on May 5, 2016 + +@author: Yael +''' + +from oslo_config import fixture as fixture_config +from oslo_log import log as logging +from oslotest.base import BaseTestCase +from valet.tests.functional.valet_validator.common import init + + +LOG = logging.getLogger(__name__) + + +class Base(BaseTestCase): + """Test case base class for all unit tests.""" + + def __init__(self, *args, **kwds): + ''' ''' + super(Base, self).__init__(*args, **kwds) + + self.CONF = self.useFixture(fixture_config.Config()).conf + init.prepare(self.CONF) + + def setUp(self): + super(Base, self).setUp() + + def run_test(self, stack_name, template_path): + ''' main function ''' + pass + + def validate(self, result): + self.assertEqual(True, result.ok, result.message) + + def validate_test(self, result): + self.assertTrue(result) + + def get_name(self): + pass diff --git a/valet/tests/functional/__init__.py b/valet/tests/functional/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/functional/etc/valet_validator.cfg b/valet/tests/functional/etc/valet_validator.cfg new file mode 100644 index 0000000..2d9182a --- /dev/null +++ b/valet/tests/functional/etc/valet_validator.cfg @@ -0,0 +1,48 @@ +[DEFAULT] +default_log_levels="valet_validator=DEBUG,tests=DEBUG,compute=DEBUG,common=DEBUG,orchestration=DEBUG,requests=ERROR" +#default_log_levels='hello1=WARN,hello1.service=DEBUG' + +[auth] +OS_AUTH_URL_WITH_VERSION=http://controller:5000/v2.0 +OS_USERNAME=admin +OS_PASSWORD=qwer4321 +OS_TENANT_NAME=demo +TOKEN_EXPIRATION=600 + +[nova] +VERSION=2 +ATTR=OS-EXT-SRV-ATTR:host + +[heat] +HEAT_URL=http://controller:8004/v1/ +KEY=output_key +VALUE=output_value +VERSION=1 + +[valet] +HOST=http://192.168.10.18:8090/v1 +DELAY_DURATION=30 +PAUSE=10 +TRIES_TO_CREATE=5 +TIME_CAP=60 + +[test_affinity] +STACK_NAME=basic_affinity_stack +TEMPLATE_NAME=affinity_basic_2_instances + +[test_affinity_3] +STACK_NAME=affinity_3_stack +TEMPLATE_NAME=affinity_ 3_Instances + +[test_diversity] +STACK_NAME=basic_diversity_stack +TEMPLATE_NAME=diversity_basic_2_instances + +[test_nested] +STACK_NAME=basic_nest_stack +TEMPLATE_NAME=diversity_between_2_affinity + +[test_exclusivity] +STACK_NAME=basic_exclusivity_stack +TEMPLATE_NAME=exclusivity_basic_2_instances + diff --git a/valet/tests/functional/valet_validator/__init__.py b/valet/tests/functional/valet_validator/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/functional/valet_validator/common/__init__.py b/valet/tests/functional/valet_validator/common/__init__.py new file mode 100644 index 0000000..9717586 --- /dev/null +++ b/valet/tests/functional/valet_validator/common/__init__.py @@ -0,0 +1,37 @@ +from oslo_log import log as logging +import time +from valet.tests.functional.valet_validator.common.init import CONF, COLORS + +LOG = logging.getLogger(__name__) + + +class Result(object): + ok = False + message = "" + + def __init__(self, ok=True, msg=""): + self.ok = ok + self.message = msg + + +class GeneralLogger(object): + @staticmethod + def delay(duration=None): + time.sleep(duration or CONF.heat.DELAY_DURATION) + + @staticmethod + def log_info(msg): + LOG.info("%s %s %s" % (COLORS["L_GREEN"], msg, COLORS["WHITE"])) + + @staticmethod + def log_error(msg, trc_back=""): + LOG.error("%s %s %s" % (COLORS["L_RED"], msg, COLORS["WHITE"])) + LOG.error("%s %s %s" % (COLORS["L_RED"], trc_back, COLORS["WHITE"])) + + @staticmethod + def log_debug(msg): + LOG.debug("%s %s %s" % (COLORS["L_BLUE"], msg, COLORS["WHITE"])) + + @staticmethod + def log_group(msg): + LOG.info("%s %s %s" % (COLORS["Yellow"], msg, COLORS["WHITE"])) diff --git a/valet/tests/functional/valet_validator/common/auth.py b/valet/tests/functional/valet_validator/common/auth.py new file mode 100644 index 0000000..9211625 --- /dev/null +++ b/valet/tests/functional/valet_validator/common/auth.py @@ -0,0 +1,51 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from keystoneclient.auth.identity import v2 as identity +from keystoneclient import session +from oslo_log import log as logging +from valet.tests.functional.valet_validator.common.init import CONF + +LOG = logging.getLogger(__name__) + +MIN_TOKEN_LIFE_SECONDS = 120 + + +class Auth(object): + ''' Singleton class for authentication token ''' + auth = None + session = None + + @staticmethod + def _init(): + if Auth.is_auth_invalid(): + Auth.auth = identity.Password(auth_url=CONF.auth.OS_AUTH_URL_WITH_VERSION, + username=CONF.auth.OS_USERNAME, + password=CONF.auth.OS_PASSWORD, + tenant_name=CONF.auth.OS_TENANT_NAME) + Auth.session = session.Session(auth=Auth.auth) + + @staticmethod + def get_password_plugin(): + Auth._init() + return Auth.auth + + @staticmethod + def get_auth_token(): + return Auth.get_password_plugin().get_token(Auth.get_auth_session()) + + @staticmethod + def get_auth_session(): + Auth._init() + return Auth.session + + @staticmethod + def get_project_id(): + return Auth.get_password_plugin().get_project_id(Auth.get_auth_session()) + + @staticmethod + def is_auth_invalid(): + return Auth.auth is None or Auth.auth.get_auth_ref(Auth.session).will_expire_soon(CONF.auth.TOKEN_EXPIRATION) diff --git a/valet/tests/functional/valet_validator/common/init.py b/valet/tests/functional/valet_validator/common/init.py new file mode 100644 index 0000000..ee28f6e --- /dev/null +++ b/valet/tests/functional/valet_validator/common/init.py @@ -0,0 +1,95 @@ +''' +Created on May 5, 2016 + +@author: Yael +''' + +import os +from oslo_config import cfg +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +DOMAIN = "valet_validator" + +""" +Black 0;30 Dark Gray 1;30 +Red 0;31 Light Red 1;31 +Green 0;32 Light Green 1;32 +Brown/Orange 0;33 Yellow 1;33 +Blue 0;34 Light Blue 1;34 +Purple 0;35 Light Purple 1;35 +Cyan 0;36 Light Cyan 1;36 +Light Gray 0;37 White 1;37 +""" +COLORS = \ + { + "WHITE": '\033[0;37m', + "L_RED": '\033[1;31m', + "L_PURPLE": '\033[1;35m', + "L_GREEN": '\033[0;32m', + "L_BLUE": '\033[1;34m', + "Yellow": '\033[0;33m' + } + + +opts_auth = \ + [ + cfg.StrOpt('OS_AUTH_URL_WITH_VERSION', default='http://controller:5000/v2.0'), + cfg.StrOpt('OS_USERNAME', default="addddmin"), + cfg.StrOpt('OS_PASSWORD', default="qwer4321"), + cfg.StrOpt('OS_TENANT_NAME', default="demo"), + cfg.IntOpt('TOKEN_EXPIRATION', default=600), + ] + +opt_nova = \ + [ + cfg.StrOpt('VERSION', default="2"), + cfg.StrOpt('ATTR', default="OS-EXT-SRV-ATTR:host"), + ] + +opt_heat = \ + [ + cfg.StrOpt('HEAT_URL', default="http://controller:8004/v1/"), + cfg.StrOpt('KEY', default="output_key"), + cfg.StrOpt('VALUE', default="output_value"), + cfg.StrOpt('VERSION', default="1"), + cfg.IntOpt('DELAY_DURATION', default=1), + cfg.IntOpt('TRIES_TO_CREATE', default=5), + cfg.IntOpt('TIME_CAP', default=60), + ] + +opt_valet = \ + [ + cfg.StrOpt('HOST', default="http://controller:8090/v1"), + cfg.IntOpt('DELAY_DURATION', default=1), + cfg.IntOpt('TRIES_TO_CREATE', default=5), + cfg.IntOpt('PAUSE', default=5), + cfg.IntOpt('TIME_CAP', default=60), + ] + +CONF.register_opts(opts_auth, group="auth") +CONF.register_opts(opt_heat, group="heat") +CONF.register_opts(opt_nova, group="nova") +CONF.register_opts(opt_valet, group="valet") + +_initialized = False + + +def prepare(CONF): + global _initialized + try: + if _initialized is False: + logging.register_options(CONF) + _initialized = True + + # Adding config file + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)) + conf_file = os.path.join(possible_topdir, 'etc', DOMAIN + '.cfg') + CONF([], project=DOMAIN, default_config_files=[conf_file] or None, validate_default_values=True) + + logging.setup(CONF, DOMAIN) + + except Exception as ex: + LOG.error("Preparation failed! %s" % ex) diff --git a/valet/tests/functional/valet_validator/common/resources.py b/valet/tests/functional/valet_validator/common/resources.py new file mode 100644 index 0000000..dd8f5bf --- /dev/null +++ b/valet/tests/functional/valet_validator/common/resources.py @@ -0,0 +1,88 @@ +''' +Created on May 23, 2016 + +@author: Yael +''' + +from oslo_log import log as logging +import traceback +import yaml + +LOG = logging.getLogger(__name__) + +TEMPLATE_RES = "resources" + + +class TemplateResources(object): + ''' Heat template parser ''' + def __init__(self, template): + self.instances = [] + self.groups = {} + self.template_data = None + + try: + with open(template, "r") as f: + self.template_data = f.read() + doc = yaml.load(self.template_data) + + for resource in doc[TEMPLATE_RES]: + resource_type = str(doc[TEMPLATE_RES][resource]["type"]) + if resource_type == "OS::Nova::Server": + self.instances.append(Instance(doc, resource)) + elif resource_type == "ATT::Valet::GroupAssignment": + self.groups[resource] = Group(doc, resource) + + except Exception: + LOG.error("Failed to initialize TemplateResources") + LOG.error(traceback.format_exc()) + + +class Instance(object): + def __init__(self, doc, instance_name): + self.resource_name = instance_name + self.name = None + self.image = None + self.flavor = None + self.key = None + + self.fill(doc, instance_name) + + def fill(self, doc, instance_name): + try: + template_property = doc[TEMPLATE_RES][instance_name]["properties"] + + self.name = template_property["name"] + self.image = template_property["image"] + self.flavor = template_property["flavor"] + + except Exception: + LOG.error("Failed to initialize Instance") + LOG.error(traceback.format_exc()) + + def get_ins(self): + return("type: %s, name: %s, image: %s, flavor: %s, resource_name: %s " + % (self.type, self.name, self.image, self.flavor, self.resource_name)) + + +class Group(object): + def __init__(self, doc, group_name): + self.group_type = None + self.group_name = None + self.level = None + self.group_resources = [] + + self.fill(doc, group_name) + + def fill(self, doc, group_name): + try: + template_property = doc[TEMPLATE_RES][group_name]["properties"] + + self.group_type = template_property["group_type"] + self.group_name = template_property["group_name"] if "group_name" in template_property else None + self.level = template_property["level"] + for res in template_property[TEMPLATE_RES]: + self.group_resources.append(res["get_resource"]) + + except Exception: + LOG.error("Failed to initialize Group") + LOG.error(traceback.format_exc()) diff --git a/valet/tests/functional/valet_validator/compute/__init__.py b/valet/tests/functional/valet_validator/compute/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/functional/valet_validator/compute/analyzer.py b/valet/tests/functional/valet_validator/compute/analyzer.py new file mode 100644 index 0000000..06dd1f6 --- /dev/null +++ b/valet/tests/functional/valet_validator/compute/analyzer.py @@ -0,0 +1,163 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from novaclient import client +import traceback +from valet.tests.functional.valet_validator.common import Result, GeneralLogger +from valet.tests.functional.valet_validator.common.auth import Auth +from valet.tests.functional.valet_validator.common.init import CONF + + +class Analyzer(object): + + def __init__(self): + ''' initializing the analyzer - connecting to nova ''' + GeneralLogger.log_info("Initializing Analyzer") + self.nova = client.Client(CONF.nova.VERSION, session=Auth.get_auth_session()) + + def get_host_name(self, instance_name): + ''' Returning host by instance name ''' + serv = self.nova.servers.find(name=instance_name) + return self.get_hostname(serv) + + def get_all_hosts(self, instances_list): + ''' Returning all hosts of all instances ''' + GeneralLogger.log_debug("Getting hosts names") + return [self.get_host_name(instance.name) for instance in instances_list] + + def check(self, resources): + ''' Checking if all instances are on the Appropriate hosts and racks ''' + GeneralLogger.log_debug("Starting to check instances location") + result = True + + try: + for key in resources.groups: + group = resources.groups[key] + + resources_to_compare = self.get_resources_to_compare(resources, group.group_resources) or group.group_resources + instances_for_group = self.get_group_instances(resources, resources_to_compare) + hosts_list = self.get_all_hosts(instances_for_group) + + # switch case + result = result and \ + { + "affinity": self.are_the_same(hosts_list, group.level), + "diversity": self.are_different(hosts_list, group.level), + "exclusivity": self.are_we_alone(hosts_list, instances_for_group) + }[group.group_type] + + except Exception as ex: + GeneralLogger.log_error("Exception at method check: %s" % ex, traceback.format_exc()) + result = False + + return Result(result) + + def get_resources_to_compare(self, resources, group_resources): + resources_to_compare = [] + + try: + for group_name in group_resources: # ['test-affinity-group1', 'test-affinity-group2'] + if "test" in group_name: + resources_to_compare.append(resources.groups[group_name].group_resources) + else: + return None + return resources_to_compare + + except Exception as ex: + GeneralLogger.log_error("Exception at method get_resources_to_compare: %s" % ex, traceback.format_exc()) + + def are_we_alone(self, hosts_list, ins_for_group): + try: + # instances is all the instances on this host + all_instances_on_host = self.get_instances_per_host(hosts_list) + for instance in ins_for_group: + if instance.name in all_instances_on_host: + all_instances_on_host.remove(instance.name) + return not all_instances_on_host + + except Exception as ex: + GeneralLogger.log_error("Exception at method are_we_alone: %s" % ex, traceback.format_exc()) + + def get_instances_per_host(self, hosts_list): + ''' get_instances_per_host ''' + instances = [] + try: + for host in set(hosts_list): + for items in self.get_vms_by_hypervisor(host): + instances.append(items.name) + + return instances + except Exception as ex: + GeneralLogger.log_error("Exception at method get_instances_per_host: %s" % ex, traceback.format_exc()) + + def are_different(self, hosts_list, level): + ''' Checking if all hosts (and racks) are different for all instances ''' + diction = {} + + try: + for h in hosts_list: + if self.is_already_exists(diction, self.get_host_or_rack(level, h)): + return False + return True + + except Exception as ex: + GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) + return False + + def are_the_same(self, hosts_list, level): + GeneralLogger.log_debug("Hosts are:") + try: + for h in hosts_list: + if self.compare_host(self.get_host_or_rack(level, h), self.get_host_or_rack(level, hosts_list[0])) is False: + return False + return True + + except Exception as ex: + GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) + return False + + def get_group_instances(self, resources, group_ins): + ''' gets the instance object according to the group_ins + + group_ins - the group_resources name of the instances belong to this group (['my-instance-1', 'my-instance-2']) + ''' + ins_for_group = [] + try: + for instance in resources.instances: + if instance.resource_name in group_ins: + ins_for_group.append(instance) + return ins_for_group + + except Exception as ex: + GeneralLogger.log_error("Exception at method get_group_instances: %s" % ex, traceback.format_exc()) + return None + + def get_hostname(self, vm): + return str(getattr(vm, CONF.nova.ATTR)) + + def is_already_exists(self, diction, item): + if item in diction: + return True + + diction[item] = 1 + return False + + def compare_rack(self, current_host, first_host): + GeneralLogger.log_debug(current_host) + return self.get_rack(current_host) == self.get_rack(first_host) + + def compare_host(self, current_host, first_host): + GeneralLogger.log_debug(current_host) + return current_host == first_host + + def get_rack(self, host): + return (host.split("r")[1])[:2] + + def get_host_or_rack(self, level, host): + return host if level == "host" else self.get_rack(host) + + def get_vms_by_hypervisor(self, host): + return [vm for vm in self.nova.servers.list(search_opts={"all_tenants": True}) if self.get_hostname(vm) == host] diff --git a/valet/tests/functional/valet_validator/group_api/__init__.py b/valet/tests/functional/valet_validator/group_api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/functional/valet_validator/group_api/valet_group.py b/valet/tests/functional/valet_validator/group_api/valet_group.py new file mode 100644 index 0000000..06d3bb5 --- /dev/null +++ b/valet/tests/functional/valet_validator/group_api/valet_group.py @@ -0,0 +1,106 @@ +''' +Created on Jul 3, 2016 + +@author: Yael +''' + +import json +import requests +import traceback +from valet.tests.functional.valet_validator.common.auth import Auth +from valet.tests.functional.valet_validator.common import GeneralLogger +from valet.tests.functional.valet_validator.common.init import CONF + + +class ValetGroup(object): + + def __init__(self): + self.groups_url = "%s/groups" % CONF.valet.HOST + + self.headers = {"X-Auth-Token": Auth.get_auth_token(), + "Content-Type": "application/json"} + + def create_group(self, group_name, group_type): + grp_data = {"name": group_name, "type": group_type} + return requests.post(self.groups_url, data=json.dumps(grp_data), headers=self.headers) + + def get_list_groups(self): + list_response = requests.get(self.groups_url, headers=self.headers) + return list_response.json()["groups"] + + def get_group_details(self, group_id): + url = self.groups_url + "/" + group_id + return requests.get(url, headers=self.headers) + + def update_group_members(self, group_id, members=None): + add_member_url = self.groups_url + "/%s/members" % group_id + data = json.dumps({"members": [members or Auth.get_project_id()]}) + + return requests.put(add_member_url, data=data, headers=self.headers) + + def update_group(self, group_id, new_description): + url = self.groups_url + "/" + group_id + new_data = json.dumps({"description": new_description}) + + return requests.put(url, new_data, headers=self.headers) + + def delete_group_member(self, group_id, member_id): + url = self.groups_url + "/%s/members/%s" % (group_id, member_id) + return requests.delete(url, headers=self.headers) + + def delete_all_group_member(self, group_id): + url = self.groups_url + "/%s/members" % group_id + return requests.delete(url, headers=self.headers) + + def delete_group(self, group_id): + url = self.groups_url + "/%s" % group_id + return requests.delete(url, headers=self.headers) + + def get_group_id_and_members(self, group_name, group_type="exclusivity"): + ''' Checks if group name exists, if not - creates it + + returns group's id and members list + ''' + group_details = self.check_group_exists(group_name) + + try: + if group_details is None: + GeneralLogger.log_info("Creating group") + create_response = self.create_group(group_name, group_type) + return create_response.json()["id"], create_response.json()["members"] + else: + GeneralLogger.log_info("Group exists") + + return group_details + except Exception: + import traceback + GeneralLogger.log_error(traceback.format_exc()) + + def add_group_member(self, group_details): + ''' Checks if member exists in group, if not - adds it ''' + # group_details - group id, group members + try: + if Auth.get_project_id() not in group_details[1]: + GeneralLogger.log_info("Adding member to group") + self.update_group_members(group_details[0]) + except Exception: + GeneralLogger.log_error("Failed to add group member", traceback.format_exc()) + + def check_group_exists(self, group_name): + ''' Checks if group exists in group list, if not returns None ''' + for grp in self.get_list_groups(): + if grp["name"] == group_name: + return grp["id"], grp["members"] + + return None + + def delete_all_groups(self): + DELETED = 204 + for group in self.get_list_groups(): + codes = [self.delete_all_group_member(group["id"]).status_code, self.delete_group(group["id"]).status_code] + + res = filter(lambda a: a != DELETED, codes) + if res: + return res[0] + + return DELETED diff --git a/valet/tests/functional/valet_validator/orchestration/__init__.py b/valet/tests/functional/valet_validator/orchestration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/functional/valet_validator/orchestration/loader.py b/valet/tests/functional/valet_validator/orchestration/loader.py new file mode 100644 index 0000000..cbf6fc1 --- /dev/null +++ b/valet/tests/functional/valet_validator/orchestration/loader.py @@ -0,0 +1,84 @@ +''' +Created on May 2, 2016 + +@author: Yael +''' + +from heatclient.client import Client +import sys +import time +import traceback +from valet.tests.functional.valet_validator.common import Result, GeneralLogger +from valet.tests.functional.valet_validator.common.auth import Auth +from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup + + +class Loader(object): + + def __init__(self): + ''' initializing the loader - connecting to heat ''' + GeneralLogger.log_info("Initializing Loader") + + heat_url = CONF.heat.HEAT_URL + str(Auth.get_project_id()) + token = Auth.get_auth_token() + + heat = Client(CONF.heat.VERSION, endpoint=heat_url, token=token) + self.stacks = heat.stacks + + def create_stack(self, stack_name, template_resources): + GeneralLogger.log_info("Starting to create stacks") + groups = template_resources.groups + + try: + for key in groups: + if groups[key].group_type == "exclusivity": + self.create_valet_group(groups[key].group_name) + + self.stacks.create(stack_name=stack_name, template=template_resources.template_data) + return self.wait(stack_name, operation="create") + + except Exception: + GeneralLogger.log_error("Failed to create stack", traceback.format_exc()) + sys.exit(1) + + def create_valet_group(self, group_name): + try: + v_group = ValetGroup() + + group_details = v_group.get_group_id_and_members(group_name) # (group_name, group_type) + v_group.add_group_member(group_details) + + except Exception: + GeneralLogger.log_error("Failed to create valet group", traceback.format_exc()) + sys.exit(1) + + def delete_stack(self, stack_name): + self.stacks.delete(stack_id=stack_name) + return self.wait(stack_name, operation="delete") + + def delete_all_stacks(self): + GeneralLogger.log_info("Starting to delete stacks") + try: + for stack in self.stacks.list(): + self.delete_stack(stack.id) + + except Exception: + GeneralLogger.log_error("Failed to delete stacks", traceback.format_exc()) + + def wait(self, stack_name, count=CONF.valet.TIME_CAP, operation="Operation"): + ''' Checking the result of the process (create/delete) and writing the result to log ''' + while str(self.stacks.get(stack_name).status) == "IN_PROGRESS" and count > 0: + count -= 1 + time.sleep(1) + + if str(self.stacks.get(stack_name).status) == "COMPLETE": + GeneralLogger.log_info(operation + " Successfully completed") + return Result() + elif str(self.stacks.get(stack_name).status) == "FAILED": + msg = operation + " failed - " + self.stacks.get(stack_name).stack_status_reason + else: + msg = operation + " timed out" + GeneralLogger.log_error(msg) + + return Result(False, msg) diff --git a/valet/tests/functional/valet_validator/tests/__init__.py b/valet/tests/functional/valet_validator/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/functional/valet_validator/tests/functional_base.py b/valet/tests/functional/valet_validator/tests/functional_base.py new file mode 100644 index 0000000..3dcc910 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/functional_base.py @@ -0,0 +1,77 @@ +''' +Created on May 5, 2016 + +@author: Yael +''' + +import os +from oslo_log import log as logging +import time +from valet.tests.base import Base +from valet.tests.functional.valet_validator.common.init import COLORS, CONF +from valet.tests.functional.valet_validator.common.resources import TemplateResources +from valet.tests.functional.valet_validator.compute.analyzer import Analyzer +from valet.tests.functional.valet_validator.orchestration.loader import Loader + + +LOG = logging.getLogger(__name__) + + +class FunctionalTestCase(Base): + """Test case base class for all unit tests.""" + + def __init__(self, *args, **kwds): + ''' initializing the FunctionalTestCase - loading the logger, loader and analyzer ''' + super(FunctionalTestCase, self).__init__(*args, **kwds) + + def setUp(self): + super(FunctionalTestCase, self).setUp() + + self.load = Loader() + self.compute = Analyzer() + + LOG.info("%s %s is starting... %s" % (COLORS["L_BLUE"], self.get_name(), COLORS["WHITE"])) + + def run_test(self, stack_name, template_path): + ''' scenario - + + deletes all stacks + create new stack + checks if host (or rack) is the same for all instances + ''' + # delete all stacks + self.load.delete_all_stacks() + + # creates new stack + my_resources = TemplateResources(template_path) + + res = self.load.create_stack(stack_name, my_resources) + if "Ostro error" in res.message: + res = self.try_again(res, stack_name, my_resources) + + self.validate(res) + LOG.info("%s stack creation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"])) + time.sleep(self.CONF.valet.DELAY_DURATION) + + # validation + self.validate(self.compute.check(my_resources)) + LOG.info("%s validation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"])) + + def try_again(self, res, stack_name, my_resources): + tries = CONF.valet.TRIES_TO_CREATE + while "Ostro error" in res.message and tries > 0: + LOG.error("Ostro error - try number %d" % (CONF.valet.TRIES_TO_CREATE - tries + 2)) + self.load.delete_all_stacks() + res = self.load.create_stack(stack_name, my_resources) + tries -= 1 + time.sleep(self.CONF.valet.PAUSE) + + return res + + def get_template_path(self, template_name): + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir)) + return os.path.join(possible_topdir, 'tests/templates', template_name + '.yml') + + def init_template(self, test): + self.stack_name = test.STACK_NAME + self.template_path = self.get_template_path(test.TEMPLATE_NAME) diff --git a/valet/tests/functional/valet_validator/tests/sanityCheck b/valet/tests/functional/valet_validator/tests/sanityCheck new file mode 100644 index 0000000..1dc9d1d --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/sanityCheck @@ -0,0 +1,13 @@ + + +heat --os-username admin --os-password qwer4321 --os-project-name demo --os-auth-url http://localhost:5000/v2.0 stack-list + + +heat --os-username admin --os-password qwer4321 --os-project-name demo --os-auth-url http://localhost:5000/v2.0 stack-create -f templates/sanityTemplate test_stack + +sleep 18 + +heat --os-username admin --os-password qwer4321 --os-project-name demo --os-auth-url http://localhost:5000/v2.0 stack-list + + +heat --os-username admin --os-password qwer4321 --os-project-name demo --os-auth-url http://localhost:5000/v2.0 stack-delete test_stack diff --git a/valet/tests/functional/valet_validator/tests/templates/affinity_ 3_Instances.yml b/valet/tests/functional/valet_validator/tests/templates/affinity_ 3_Instances.yml new file mode 100644 index 0000000..173643d --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/templates/affinity_ 3_Instances.yml @@ -0,0 +1,56 @@ +heat_template_version: 2015-04-30 + +description: Create stack – host level affinity – 3 Instances + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: test-1 + # key_name: ssh-key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + # networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + my-instance-2: + type: OS::Nova::Server + properties: + name: test-2 + #key_name: ssh-key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + #networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + my-instance-3: + type: OS::Nova::Server + properties: + name: test-3 + # key_name: ssh-key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + # networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + test-affinity-group1: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + - {get_resource: my-instance-3} + +outputs: + instance_ip-1: + description: The IP addresses of the deployed instance 1 + value: { get_attr: [my-instance-1, first_address] } + instance_ip-2: + description: The IP addresses of the deployed instance 2 + value: { get_attr: [my-instance-2, first_address] } + instance_ip-3: + description: The IP addresses of the deployed instance 3 + value: { get_attr: [my-instance-3, first_address] } diff --git a/valet/tests/functional/valet_validator/tests/templates/affinity_basic_2_instances.yml b/valet/tests/functional/valet_validator/tests/templates/affinity_basic_2_instances.yml new file mode 100644 index 0000000..e70d46d --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/templates/affinity_basic_2_instances.yml @@ -0,0 +1,41 @@ +heat_template_version: 2015-04-30 + +description: Affinity template - all VMs should be deployed on the same host + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: test-1 + # key_name: ssh-key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + # networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + my-instance-2: + type: OS::Nova::Server + properties: + name: test-2 + #key_name: ssh-key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + #networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + # # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + test-affinity-group3: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + +outputs: + instance_name-1: + value: { get_attr: [my-instance-1, name] } + instance_name-2: + value: { get_attr: [my-instance-2, name] } diff --git a/valet/tests/functional/valet_validator/tests/templates/diversity_basic_2_instances.yml b/valet/tests/functional/valet_validator/tests/templates/diversity_basic_2_instances.yml new file mode 100644 index 0000000..9b8aa55 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/templates/diversity_basic_2_instances.yml @@ -0,0 +1,51 @@ +heat_template_version: 2015-04-30 + +description: Diversity template - all VMs should be deployed on different hosts + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: test-1 + #key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + #networks: + #- network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + my-instance-2: + type: OS::Nova::Server + properties: + name: test-2 + #key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + #networks: + #- network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + # my-instance-3: + # type: OS::Nova::Server + # properties: + # name: test-3 + # key_name: my_key + # image: cirros-0.3.4-x86_64-disk.img + # flavor: m1.small + # networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + test-diversity-group: + type: ATT::Valet::GroupAssignment + properties: + group_type: diversity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + #- {get_resource: my-instance-3} + +outputs: + instance_name-1: + value: { get_attr: [my-instance-1, name] } + instance_name-2: + value: { get_attr: [my-instance-2, name] } \ No newline at end of file diff --git a/valet/tests/functional/valet_validator/tests/templates/diversity_between_2_affinity.yml b/valet/tests/functional/valet_validator/tests/templates/diversity_between_2_affinity.yml new file mode 100644 index 0000000..21aac96 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/templates/diversity_between_2_affinity.yml @@ -0,0 +1,76 @@ +heat_template_version: 2015-04-30 + +description: Nested affinity and diversity template - Host level diversity between 2 groups of affinity + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: test-1 + #key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + + my-instance-2: + type: OS::Nova::Server + properties: + name: test-2 + #key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + + my-instance-3: + type: OS::Nova::Server + properties: + name: test-3 + # key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + + my-instance-4: + type: OS::Nova::Server + properties: + name: test-4 + #key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + + + test-affinity-group1: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + + test-affinity-group2: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-3} + - {get_resource: my-instance-4} + + test-diversity-group: + type: ATT::Valet::GroupAssignment + properties: + group_type: diversity + level: host + resources: + - {get_resource: test-affinity-group1} + - {get_resource: test-affinity-group2} + + +outputs: + instance_name-1: + value: { get_attr: [my-instance-1, name] } + instance_name-2: + value: { get_attr: [my-instance-2, name] } + instance_name-3: + value: { get_attr: [my-instance-3, name] } + instance_name-4: + value: { get_attr: [my-instance-4, name] } diff --git a/valet/tests/functional/valet_validator/tests/templates/exclusivity_basic_2_instances.yml b/valet/tests/functional/valet_validator/tests/templates/exclusivity_basic_2_instances.yml new file mode 100644 index 0000000..5152012 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/templates/exclusivity_basic_2_instances.yml @@ -0,0 +1,48 @@ +heat_template_version: 2015-04-30 + +description: Simple template + +resources: + my-instance-1: + type: OS::Nova::Server + properties: + name: test-1 + #key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + + my-instance-2: + type: OS::Nova::Server + properties: + name: test-2 + #key_name: my_key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + + # my-instance-3: + # type: OS::Nova::Server + # properties: + # name: test-3 + # #key_name: my_key + # image: cirros-0.3.4-x86_64-disk.img + # flavor: m1.tiny + + test-exclusivity-group: + type: ATT::Valet::GroupAssignment + properties: + group_type: exclusivity + group_name: template_group + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + + + # test-diversity-group: + # type: ATT::CloudQoS::ResourceGroup + # properties: + # relationship: diversity + # level: host + # resources: + # - {get_resource: my-instance-1} + # - {get_resource: my-instance-3} diff --git a/valet/tests/functional/valet_validator/tests/templates/sanityTemplate b/valet/tests/functional/valet_validator/tests/templates/sanityTemplate new file mode 100644 index 0000000..092bcb8 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/templates/sanityTemplate @@ -0,0 +1,42 @@ +heat_template_version: 2015-04-30 + +description: Affinity template - all VMs should be deployed on the same host + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: test-1 + # key_name: ssh-key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + # networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + my-instance-2: + type: OS::Nova::Server + properties: + name: test-2 + #key_name: ssh-key + image: cirros-0.3.4-x86_64 + flavor: m1.tiny + #networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + # # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + test-affinity-group3: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + +outputs: + instance_name-1: + value: { get_attr: [my-instance-1, name] } + instance_name-2: + value: { get_attr: [my-instance-2, name] } + diff --git a/valet/tests/functional/valet_validator/tests/test_affinity.py b/valet/tests/functional/valet_validator/tests/test_affinity.py new file mode 100644 index 0000000..53c1851 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/test_affinity.py @@ -0,0 +1,34 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from oslo_config import cfg +from oslo_log import log as logging +from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase + + +opt_test_aff = \ + [ + cfg.StrOpt('STACK_NAME', default="basic_affinity_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="affinity_basic_2_instances"), + ] + +CONF.register_opts(opt_test_aff, group="test_affinity") +LOG = logging.getLogger(__name__) + + +class TestAffinity(FunctionalTestCase): + + def setUp(self): + ''' Adding configuration and logging mechanism ''' + super(TestAffinity, self).setUp() + self.init_template(CONF.test_affinity) + + def test_affinity(self): + self.run_test(self.stack_name, self.template_path) + + def get_name(self): + return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py b/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py new file mode 100644 index 0000000..3a22d94 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py @@ -0,0 +1,33 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from oslo_config import cfg +from oslo_log import log as logging +from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase + + +opt_test_aff = [ + cfg.StrOpt('STACK_NAME', default="affinity_3_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="affinity_ 3_Instances"), + ] + +CONF.register_opts(opt_test_aff, group="test_affinity_3") +LOG = logging.getLogger(__name__) + + +class TestAffinity_3(FunctionalTestCase): + + def setUp(self): + ''' Adding configuration and logging mechanism ''' + super(TestAffinity_3, self).setUp() + self.init_template(CONF.test_affinity_3) + + def test_affinity(self): + self.run_test(self.stack_name, self.template_path) + + def get_name(self): + return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_diversity.py b/valet/tests/functional/valet_validator/tests/test_diversity.py new file mode 100644 index 0000000..ea5f7aa --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/test_diversity.py @@ -0,0 +1,35 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from oslo_config import cfg +from oslo_log import log as logging +from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase + + +opt_test_div = \ + [ + cfg.StrOpt('STACK_NAME', default="basic_diversity_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="diversity_basic_2_instances"), + ] + +CONF.register_opts(opt_test_div, group="test_diversity") +LOG = logging.getLogger(__name__) + + +class TestDiversity(FunctionalTestCase): + + def setUp(self): + ''' Initiating template ''' + super(TestDiversity, self).setUp() + self.init_template(CONF.test_diversity) + + def test_diversity(self): + + self.run_test(self.stack_name, self.template_path) + + def get_name(self): + return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_exclusivity.py b/valet/tests/functional/valet_validator/tests/test_exclusivity.py new file mode 100644 index 0000000..6868518 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/test_exclusivity.py @@ -0,0 +1,34 @@ +''' +Created on Jun 1, 2016 + +@author: Yael +''' + +from oslo_config import cfg +from oslo_log import log as logging +from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase + + +opt_test_ex = \ + [ + cfg.StrOpt('STACK_NAME', default="basic_exclusivity_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="exclusivity_basic_2_instances"), + ] + +CONF.register_opts(opt_test_ex, group="test_exclusivity") +LOG = logging.getLogger(__name__) + + +class TestExclusivity(FunctionalTestCase): + + def setUp(self): + ''' Initiating template ''' + super(TestExclusivity, self).setUp() + self.init_template(CONF.test_exclusivity) + + def test_exclusivity(self): + self.run_test(self.stack_name, self.template_path) + + def get_name(self): + return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_groups.py b/valet/tests/functional/valet_validator/tests/test_groups.py new file mode 100644 index 0000000..56aca46 --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/test_groups.py @@ -0,0 +1,74 @@ +''' +Created on Jul 4, 2016 + +@author: Yael +''' + + +from valet.tests.functional.valet_validator.common.auth import Auth +from valet.tests.functional.valet_validator.common import GeneralLogger +from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup +from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase + + +class TestGroups(FunctionalTestCase): + + def setUp(self): + ''' Adding configuration and logging mechanism ''' + super(TestGroups, self).setUp() + self.groups = ValetGroup() + self.group_name = "test_group" + self.group_type = "exclusivity" + + def test_groups(self): + GeneralLogger.log_group("Delete all stacks") + self.load.delete_all_stacks() + + GeneralLogger.log_group("Delete all members and groups") + + respose_code = self.groups.delete_all_groups() + self.assertEqual(204, respose_code, "delete_all_groups failed with code %s" % respose_code) + + self.assertEqual([], self.groups.get_list_groups(), "delete_all_groups failed") + + GeneralLogger.log_group("Try to delete not existing group") + response = self.groups.delete_group("d68f62b1-4758-4ea5-a93a-8f9d9c0ae912") + self.assertEqual(404, response.status_code, "delete_group failed with code %s" % response.status_code) + + GeneralLogger.log_group("Create test_group") + group_info = self.groups.create_group(self.group_name, self.group_type) + self.assertEqual(201, group_info.status_code, "create_group failed with code %s" % group_info.status_code) + + grp_id = group_info.json()["id"] + + GeneralLogger.log_group("Return list of groups") + GeneralLogger.log_group(str(self.groups.get_list_groups())) + + GeneralLogger.log_group("Create test member (NOT tenant ID)") + member_respone = self.groups.update_group_members(grp_id, members="test_member") + self.assertEqual(409, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code) + + GeneralLogger.log_group("Add description to group") + desc_response = self.groups.update_group(grp_id, "new_description") + self.assertEqual(201, desc_response.status_code, "update_group failed with code %s" % desc_response.status_code) + + GeneralLogger.log_group("Create member (tenant ID)") + member_respone = self.groups.update_group_members(grp_id) + self.assertEqual(201, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code) + + GeneralLogger.log_group("Return list of groups") + GeneralLogger.log_group(self.groups.get_group_details(grp_id).json()) + + GeneralLogger.log_group("Delete test member (NOT tenant ID)") + member_respone = self.groups.delete_group_member(grp_id, "test_member") + self.assertEqual(404, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code) + + GeneralLogger.log_group("Delete member (tenant ID)") + member_respone = self.groups.delete_group_member(grp_id, Auth.get_project_id()) + self.assertEqual(204, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code) + + GeneralLogger.log_group("Return list of groups") + GeneralLogger.log_group(self.groups.get_group_details(grp_id).json()) + + def get_name(self): + return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_nested.py b/valet/tests/functional/valet_validator/tests/test_nested.py new file mode 100644 index 0000000..647961d --- /dev/null +++ b/valet/tests/functional/valet_validator/tests/test_nested.py @@ -0,0 +1,34 @@ +''' +Created on May 18, 2016 + +@author: root +''' + +from oslo_config import cfg +from oslo_log import log as logging +from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase + + +opt_test_aff = \ + [ + cfg.StrOpt('STACK_NAME', default="nest_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="diversity_between_2_affinity"), + ] + +CONF.register_opts(opt_test_aff, group="test_nested") +LOG = logging.getLogger(__name__) + + +class TestNested(FunctionalTestCase): + + def setUp(self): + ''' Adding configuration and logging mechanism ''' + super(TestNested, self).setUp() + self.init_template(CONF.test_nested) + + def test_nested(self): + self.run_test(self.stack_name, self.template_path) + + def get_name(self): + return __name__ diff --git a/valet/tests/tempest/README.rst b/valet/tests/tempest/README.rst new file mode 100644 index 0000000..70d4a4d --- /dev/null +++ b/valet/tests/tempest/README.rst @@ -0,0 +1,70 @@ +=============================================== +Tempest Integration of valet +=============================================== + +Tempest has an external test plugin interface which enables anyone to integrate an +external test suite as part of a tempest run. This will let any project leverage +being run with the rest of the tempest suite while not requiring the tests live in +the tempest tree. +http://docs.openstack.org/developer/tempest/plugin.html + +Dealing with configuration options +---------------------------------- + +Historically Tempest didn't provide external guarantees on its configuration options. +However, with the introduction of the plugin interface this is no longer the case. An +external plugin can rely on using any configuration option coming from Tempest, there +will be at least a full deprecation cycle for any option before it's removed. However, +just the options provided by Tempest may not be sufficient for the plugin. If you need +to add any plugin specific configuration options you should use the register_opts and +get_opt_lists methods to pass them to Tempest when the plugin is loaded. When adding +configuration options the register_opts method gets passed the CONF object from tempest. +This enables the plugin to add options to both existing sections and also create new +configuration sections for new options. + +Using Plugins +------------- +Tempest will automatically discover any installed plugins when it is run. So by just +installing the python packages which contain your plugin you'll be using them with tempest, +nothing else is really required. + +However, you should take care when installing plugins. By their very nature there are no +guarantees when running tempest with plugins enabled about the quality of the plugin. +Additionally, while there is no limitation on running with multiple plugins it's worth +noting that poorly written plugins might not properly isolate their tests which could cause +unexpected cross interactions between plugins. + +Notes for using plugins with virtualenvs +---------------------------------------- + +When using a tempest inside a virtualenv (like when running under tox) you have to ensure that +the package that contains your plugin is either installed in the venv too or that you have system +site-packages enabled. The virtualenv will isolate the tempest install from the rest of your system +so just installing the plugin package on your system and then running tempest inside a venv will not +work. + +Tempest also exposes a tox job, all-plugin, which will setup a tox virtualenv with system site-packages +enabled. This will let you leverage tox without requiring to manually install plugins in the tox venv +before running tests. + +Commands to Run the plugin +-------------------------- + +To list all Valet tempest cases, go to tempest directory, then run: + +$ testr list-tests valet + +To run only these tests in tempest, go to tempest directory, then run: + +$ ./run_tempest.sh -N -- valet + +$ tox -eall-plugin valet + +And, to run a specific test: + +$ tox -eall-plugin valet.tests.tempest.tests.api.test_groups.ValetGroupsTest.test_list_groups + +To run test from valet folder itself(Make sure /etc/tempest/tempest.conf exists): + +$ python -m subunit.run discover | subunit-trace + diff --git a/valet/tests/tempest/__init__.py b/valet/tests/tempest/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/tempest/api/__init__.py b/valet/tests/tempest/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/tempest/api/base.py b/valet/tests/tempest/api/base.py new file mode 100644 index 0000000..cd8b819 --- /dev/null +++ b/valet/tests/tempest/api/base.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from tempest import config +from tempest import test + +from valet.tests.tempest.services import client + +CONF = config.CONF + + +class BaseValetTest(test.BaseTestCase): + + credentials = ['primary'] + + @classmethod + def skip_checks(cls): + super(BaseValetTest, cls).skip_checks() + if not CONF.service_available.valet: + skip_msg = ("%s skipped as valet is not available" % cls.__name__) + raise cls.skipException(skip_msg) + + @classmethod + def setup_clients(cls): + super(BaseValetTest, cls).setup_clients() + cls.valet_client = client.ValetClient( + cls.os.auth_provider, + CONF.placement.catalog_type, + CONF.identity.region, + **cls.os.default_params_with_timeout_values) + + @classmethod + def resource_setup(cls): + super(BaseValetTest, cls).resource_setup() + cls.catalog_type = CONF.placement.catalog_type + + @classmethod + def resource_cleanup(cls): + super(BaseValetTest, cls).resource_cleanup() diff --git a/valet/tests/tempest/api/disabled_test_plan.py b/valet/tests/tempest/api/disabled_test_plan.py new file mode 100644 index 0000000..948e50b --- /dev/null +++ b/valet/tests/tempest/api/disabled_test_plan.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from tempest import test +from tempest_lib.common.utils import data_utils +from valet.tests.tempest.api import base + + +class ValetPlanTest(base.BaseValetTest): + + @classmethod + def setup_clients(cls): + super(ValetPlanTest, cls).setup_clients() + cls.client = cls.valet_client + + def _get_plan_name_stack_id(self): + return data_utils.rand_uuid() + + def _get_resource_name_id(self): + resource_info = {} + resource_info['name'] = data_utils.rand_name(name='resource') + resource_info['id'] = data_utils.rand_uuid() + return resource_info + + def _create_excluded_hosts(self): + return data_utils.rand_name(name='qos') + + def _get_resource_property(self): + properties = {} + # TODO(kr336r): Use tempest to get/create flavour, image, networks + # Is it required really ??? + properties['flavor'] = "m1.small" + properties['image'] = "ubuntu_1204" + properties['networks'] = [{"network": "demo-net"}] + return properties + + def _create_resource(self): + resources = {} + _resource_data = {} + _resource_name_id = self._get_resource_name_id() + _resource_property = self._get_resource_property() + _resource_data['properties'] = _resource_property + _resource_data['type'] = "OS::Nova::Server" + _resource_data['name'] = _resource_name_id['name'] + resources = { + _resource_name_id['id']: _resource_data + } + return resources + + def _delete_plan(self, plan_id): + self.client.delete_plan(plan_id) + + def _get_stack_and_plan_id(self): + stack_and_plan = {} + _plan_name_stack_id = self._get_plan_name_stack_id() + _resources = self._create_resource() + resp = self.client.create_plan(_plan_name_stack_id, + _resources, + _plan_name_stack_id) + stack_id = resp['plan']['stack_id'] + plan_id = resp['plan']['id'] + plan_name = resp['plan']['name'] + for key, value in resp['plan']['placements'].iteritems(): + stack_and_plan['resource_id'] = key + location = resp['plan']['placements'][stack_and_plan['resource_id']]['location'] + stack_and_plan['stack_id'] = stack_id + stack_and_plan['plan_id'] = plan_id + stack_and_plan['name'] = plan_name + stack_and_plan['location'] = location + return stack_and_plan + + @test.idempotent_id('f25ea766-c91e-40ca-b96c-dff42129803d') + def test_create_plan(self): + stack_and_plan = self._get_stack_and_plan_id() + stack_id = stack_and_plan['stack_id'] + plan_id = stack_and_plan['plan_id'] + plan_name = stack_and_plan['name'] + self.assertEqual(stack_id, plan_name) + self.addCleanup(self._delete_plan, plan_id) + + @test.idempotent_id('973635f4-b5c9-4b78-81e7-d273e1782afc') + def test_update_plan_action_migrate(self): + stack_and_plan = self._get_stack_and_plan_id() + stack_id = stack_and_plan['stack_id'] + plan_id = stack_and_plan['plan_id'] + plan_name = stack_and_plan['name'] + resource_id = stack_and_plan['resource_id'] + resources = [] + resources.append(resource_id) + excluded_hosts = [] + excluded_hosts.append(stack_and_plan['location']) + action = "migrate" + body = self.client.update_plan(plan_id, + action, + excluded_hosts, + resources) + self.assertIn('id', body['plan']) + self.assertEqual(stack_id, plan_name) + self.addCleanup(self._delete_plan, plan_id) diff --git a/valet/tests/tempest/api/test_groups.py b/valet/tests/tempest/api/test_groups.py new file mode 100644 index 0000000..4ea3e1d --- /dev/null +++ b/valet/tests/tempest/api/test_groups.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + + +from tempest.common.utils import data_utils +from tempest import test + +from valet.tests.tempest.api import base + + +class ValetGroupsTest(base.BaseValetTest): + """Here we test the basic group operations of Valet Groups""" + + @classmethod + def setup_clients(cls): + super(ValetGroupsTest, cls).setup_clients() + cls.client = cls.valet_client + + @test.idempotent_id('b2655098-5a0d-11e6-9efd-525400af9658') + def test_list_groups(self): + group_ids = list() + fetched_ids = list() + + for _ in range(3): + group_name = data_utils.rand_name('group') + description = data_utils.rand_name('Description') + group = self.client.create_group( + name=group_name, group_type='exclusivity', + description=description) + self.addCleanup(self.client.delete_group, group['id']) + group_ids.append(group['id']) + + # List and Verify Groups + body = self.client.list_groups()['groups'] + + for group in body: + fetched_ids.append(group['id']) + missing_groups = [g for g in group_ids if g not in fetched_ids] + + self.assertEqual([], missing_groups) + + @test.idempotent_id('2ab0337e-6472-11e6-b6c6-080027824017') + def test_create_group(self): + group_name = data_utils.rand_name('group') + description = data_utils.rand_name('Description') + group = self.client.create_group( + name=group_name, group_type='exclusivity', + description=description) + self.addCleanup(self.client.delete_group, group['id']) + + self.assertIn('id', group) + self.assertIn('name', group) + self.assertEqual(group_name, group['name']) + self.assertIn('type', group) + self.assertIn('description', group) + + @test.idempotent_id('35f0aa20-6472-11e6-b6c6-080027824017') + def test_delete_group(self): + # Create group + group_name = data_utils.rand_name('group') + description = data_utils.rand_name('Description') + body = self.client.create_group( + name=group_name, group_type='exclusivity', + description=description) + + group_id = body.get('id') + + # Delete Group + self.client.delete_group(group_id) + + # List and verify group doesn't exist + groups = self.client.list_groups()['groups'] + groups_id = [group['id'] for group in groups] + + self.assertNotIn(group_id, groups_id) + + @test.attr(type='smoke') + @test.idempotent_id('460d86e4-6472-11e6-b6c6-080027824017') + def test_update_group(self): + # Create group + group_name = data_utils.rand_name('group') + description = data_utils.rand_name('Description') + group = self.client.create_group( + name=group_name, group_type='exclusivity', + description=description) + + self.addCleanup(self.client.delete_group, group['id']) + + group_id = group.get('id') + + new_desc = data_utils.rand_name('UpdateDescription') + updated_group = self.client.update_group( + group_id, new_desc) + + self.assertEqual(updated_group['description'], new_desc) + + @test.idempotent_id('4f660e50-6472-11e6-b6c6-080027824017') + def test_show_group(self): + # Create group + group_name = data_utils.rand_name('group') + description = data_utils.rand_name('Description') + group = self.client.create_group( + name=group_name, group_type='exclusivity', + description=description) + + self.addCleanup(self.client.delete_group, group['id']) + + group_id = group.get('id') + + group_details = self.client.show_group(group_id)['group'] + + self.assertIn('id', group_details) + self.assertIn('name', group_details) + self.assertEqual(group_name, group_details['name']) + self.assertIn('type', group_details) + self.assertIn('description', group_details) + self.assertIn('members', group_details) diff --git a/valet/tests/tempest/api/test_members.py b/valet/tests/tempest/api/test_members.py new file mode 100644 index 0000000..d3889bf --- /dev/null +++ b/valet/tests/tempest/api/test_members.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from tempest import test +from tempest_lib.common.utils import data_utils +from valet.tests.tempest.api import base + + +class ValetGroupsMembersTest(base.BaseValetTest): + + @classmethod + def setup_clients(cls): + super(ValetGroupsMembersTest, cls).setup_clients() + cls.client = cls.valet_client + cls.TenantsClient = getattr(cls.os, "tenants_client", cls.os.identity_client) + + def _create_group(self): + group_name = data_utils.rand_name('membergroup') + resp = self.client.create_group(name=group_name, + group_type='exclusivity', + description='Test Member Group') + group_id = resp['id'] + self.addCleanup(self._delete_group, group_id) + return group_id + + def _delete_group(self, group_id): + self.client.delete_all_members(group_id) + self.client.delete_group(group_id) + + def _create_tenant(self): + tenant_name = data_utils.rand_name(name='tenant') + tenant_desc = data_utils.rand_name(name='desc') + body = self.TenantsClient.create_tenant(name=tenant_name, description=tenant_desc) + tenant_id = body['tenant']['id'] + self.addCleanup(self.TenantsClient.delete_tenant, tenant_id) + return tenant_id + + @test.idempotent_id('5aeec320-65d5-11e6-8b77-86f30ca893d3') + def test_add_single_member_to_a_group(self): + # Create a tenant + tenants = [] + tenant_id = self._create_tenant() + tenants.append(tenant_id) + # Create a group + group_id = self._create_group() + # Add the newly created tenant to the group + resp = self.client.add_members(group_id, tenants) + # Retrieve the relevant response information + members = resp['members'] + groupid = resp['id'] + self.assertEqual(members[0], tenants[0]) + self.assertEqual(group_id, groupid) + self.assertIn('description', resp) + self.assertIn('type', resp) + self.assertIn('name', resp) + + @test.idempotent_id('5aeec6f4-65d5-11e6-8b77-86f30ca893d3') + def test_add_multiple_members_to_a_group(self): + # Create multiple tenants + tenants = [] + for count in range(0, 4): + tenant_id = self._create_tenant() + tenants.append(tenant_id) + # Create a group + group_id = self._create_group() + # Add the newly created tenant to the group + resp = self.client.add_members(group_id, tenants) + # Retrieve the relevant response information + groupid = resp['id'] + members = resp['members'] + self.assertItemsEqual(members, tenants) + self.assertEqual(group_id, groupid) + self.assertIn('description', resp) + self.assertIn('type', resp) + self.assertIn('name', resp) + self.assertIn('members', resp) + + @test.idempotent_id('5aeec8b6-65d5-11e6-8b77-86f30ca893d3') + def test_add_single_member_to_a_group_and_verify_membership(self): + # Create a tenant + tenants = [] + tenant_id = self._create_tenant() + tenants.append(tenant_id) + # Create a group + group_id = self._create_group() + # Add the newly created tenant to the group + self.client.add_members(group_id, tenants) + # Verify membership + resp = self.client.verify_membership(group_id, tenant_id) + status = int(resp.response['status']) + self.assertEqual(204, status) + + @test.idempotent_id('5aeec99c-65d5-11e6-8b77-86f30ca893d3') + def test_delete_member_from_group(self): + # Create multiple tenants + tenants = [] + for count in range(0, 4): + tenant_id = self._create_tenant() + tenants.append(tenant_id) + # Create a group + group_id = self._create_group() + # Add the newly created tenant to the group + resp = self.client.add_members(group_id, tenants) + groupid = resp['id'] + resp = self.client.delete_member(groupid, tenants[2]) + status = int(resp.response['status']) + self.assertEqual(204, status) + + @test.idempotent_id('5aeecb68-65d5-11e6-8b77-86f30ca893d3') + def test_delete_all_members_from_group(self): + # Create multiple tenants + tenants = [] + for count in range(0, 4): + tenant_id = self._create_tenant() + tenants.append(tenant_id) + # Create a group + group_id = self._create_group() + # Add the newly created tenant to the group + resp = self.client.add_members(group_id, tenants) + groupid = resp['id'] + resp = self.client.delete_all_members(groupid) + status = int(resp.response['status']) + self.assertEqual(204, status) diff --git a/valet/tests/tempest/config.py b/valet/tests/tempest/config.py new file mode 100644 index 0000000..aaf6172 --- /dev/null +++ b/valet/tests/tempest/config.py @@ -0,0 +1,48 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +service_available_group = cfg.OptGroup(name="service_available", + title="Available OpenStack Services") + +ServiceAvailableGroup = [ + cfg.BoolOpt("valet", + default=True, + help="Whether or not valet is expected to be available"), +] + +placement_group = cfg.OptGroup(name="placement", + title="Valet Service option") +PlacementGroup = [ + cfg.StrOpt('catalog_type', + default='placement', + help="Catalog type of the placement service."), + cfg.StrOpt("endpoint_type", + default="publicURL", + choices=["publicURL", "adminURL", "internalURL"], + help="The endpoint type for valet service."), +] + +valet_group = cfg.OptGroup(name="valet", title="Valet basic") + +opt_valet = \ + [ + cfg.IntOpt('TRIES_TO_CREATE', default=5), + cfg.IntOpt('PAUSE', default=5), + ] diff --git a/valet/tests/tempest/plugin.py b/valet/tests/tempest/plugin.py new file mode 100644 index 0000000..470439d --- /dev/null +++ b/valet/tests/tempest/plugin.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from tempest import config +from tempest.test_discover import plugins +from valet.tests.tempest import config as project_config + +import valet + + +class ValetTempestPlugin(plugins.TempestPlugin): + def load_tests(self): + base_path = os.path.split(os.path.dirname( + os.path.abspath(valet.__file__)))[0] + test_dir = "valet/tests/tempest" + full_test_dir = os.path.join(base_path, test_dir) + return full_test_dir, base_path + + def register_opts(self, conf): + config.register_opt_group(conf, project_config.service_available_group, project_config.ServiceAvailableGroup) + + config.register_opt_group(conf, project_config.placement_group, project_config.PlacementGroup) + + config.register_opt_group(conf, project_config.valet_group, project_config.opt_valet) + + def get_opt_lists(self): + pass diff --git a/valet/tests/tempest/scenario/__init__.py b/valet/tests/tempest/scenario/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/tempest/scenario/analyzer.py b/valet/tests/tempest/scenario/analyzer.py new file mode 100644 index 0000000..8f88b06 --- /dev/null +++ b/valet/tests/tempest/scenario/analyzer.py @@ -0,0 +1,206 @@ +''' +Created on Nov 8, 2016 + +@author: Yael +''' + +from collections import defaultdict +import os +from tempest import config +import traceback + +CONF = config.CONF + + +class Analyzer(object): + + def __init__(self, logger, stack_id, heat, nova): + ''' initializing the analyzer - connecting to nova ''' + self.heat_client = heat + self.nova_client = nova + self.possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)) + self.stack_identifier = stack_id + self.log = logger + self.resource_name = {} + self.instance_on_server = {} + self.group_instance_name = {} + + def check(self, resources): + ''' Checking if all instances are on the Appropriate hosts and racks ''' + self.log.log_info("Starting to check instances location") + result = True + + self.init_servers_list() + self.init_resources(resources) + ins_group = self.init_instances_for_group(resources) + + try: + for group_type in ins_group: + for group_resource in ins_group[group_type]: + instances = group_resource[:2] + level = group_resource[2] + + fn = \ + { + "affinity": self.are_the_same, + "diversity": self.are_different, + "exclusivity": self.are_we_alone + }[group_type] + + result = result and fn(instances, level) + + except Exception as ex: + self.log.log_error("Exception at method check: %s" % ex, traceback.format_exc()) + result = False + + return result + + def init_instances_for_group(self, resources): + ins_group = defaultdict(list) + + for grp in resources.groups.keys(): + self.group_instance_name[grp] = resources.groups[grp].group_resources + resources.groups[grp].group_resources.append(resources.groups[grp].level) + ins_group[resources.groups[grp].group_type].append(resources.groups[grp].group_resources) + + # replacing group for it's instances + ins_group = self.organize(ins_group) + + return ins_group + + def init_resources(self, resources): + for ins in resources.instances: + self.resource_name[ins.resource_name] = ins.name + + def init_servers_list(self): + servers_list = self.nova_client.list_servers() + + for i in range(len(servers_list["servers"])): + server = self.nova_client.show_server(servers_list["servers"][i]["id"]) + self.instance_on_server[servers_list["servers"][i]["name"]] = server["server"]["OS-EXT-SRV-ATTR:host"] + + def get_instance_name(self, res_name): + return self.resource_name[res_name] + + def get_instance_host(self, res_name): + hosts = [] + + if len(self.instance_on_server) == 0: + self.init_servers_list() + self.log.log_info("instance_on_server: %s" % self.instance_on_server) + + for res in res_name: + name = self.get_instance_name(res) + hosts.append(self.instance_on_server[name]) + + return hosts + + def are_the_same(self, res_name, level): + self.log.log_info("are_the_same") + hosts_list = self.get_instance_host(res_name) + self.log.log_info(hosts_list) + + try: + for h in hosts_list: + if self.compare_host(self.get_host_or_rack(level, h), self.get_host_or_rack(level, hosts_list[0])) is False: + return False + return True + + except Exception as ex: + self.log.log_error("Exception at method are_the_same: %s" % ex, traceback.format_exc()) + return False + + def are_different(self, res_name, level): + ''' Checking if all hosts (and racks) are different for all instances ''' + self.log.log_info("are_different") + diction = {} + hosts_list = self.get_instance_host(res_name) + self.log.log_info(hosts_list) + + try: + for h in hosts_list: + if self.is_already_exists(diction, self.get_host_or_rack(level, h)): + return False + return True + + except Exception as ex: + self.log.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) + return False + + def are_we_alone(self, ins_for_group, level): + self.log.log_info("are_we_alone ") + self.log.log_info(ins_for_group) + + instances = self.instance_on_server.keys() + if level == "rack": + instances = self.get_rack_instances(set(self.instance_on_server.values())) + + # instance_on_server should be all the instances on the rack + if len(instances) < 1: + return False + + for instance in ins_for_group: + if self.resource_name[instance] in instances: + instances.remove(self.resource_name[instance]) + + return not instances + + def organize(self, ins_group): + internal_ins = [] + for x in ins_group: + for y in ins_group[x]: + if y[0] in self.group_instance_name.keys(): + internal_ins.append(self.group_instance_name[y[0]][0]) + internal_ins.append(self.group_instance_name[y[1]][0]) + internal_ins.append(y[2]) + ins_group.pop(x) + ins_group[x].append(internal_ins) + return ins_group + + def get_group_instances(self, resources, group_ins): + ''' gets the instance object according to the group_ins + + group_ins - the group_resources name of the instances belong to this group (['my-instance-1', 'my-instance-2']) + ''' + ins_for_group = [] + try: + for instance in resources.instances: + if instance.resource_name in group_ins: + ins_for_group.append(instance) + return ins_for_group + + except Exception as ex: + self.log.log_error("Exception at method get_group_instances: %s" % ex, traceback.format_exc()) + return None + + def get_rack_instances(self, hosts): + racks = [] + for host in hosts: + racks.append(self.get_rack(host)) + + instances = [] + for x in self.instance_on_server: + if self.get_rack(self.instance_on_server[x]) in racks: + instances.append(x) + return instances + + def is_already_exists(self, diction, item): + if item in diction: + return True + + diction[item] = 1 + return False + + def compare_rack(self, current_host, first_host): + self.log.log_debug(current_host) + return self.get_rack(current_host) == self.get_rack(first_host) + + def compare_host(self, current_host, first_host): + self.log.log_debug(current_host) + return current_host == first_host + + def get_rack(self, host): + return (host.split("r")[1])[:2] + + def get_host_or_rack(self, level, host): + return host if level == "host" else self.get_rack(host) diff --git a/valet/tests/tempest/scenario/general_logger.py b/valet/tests/tempest/scenario/general_logger.py new file mode 100644 index 0000000..270193b --- /dev/null +++ b/valet/tests/tempest/scenario/general_logger.py @@ -0,0 +1,41 @@ +''' +Created on Nov 10, 2016 + +@author: Yael +''' + +from oslo_log import log as logging +from tempest import config + +CONF = config.CONF +LOG = logging.getLogger(__name__) + +COLORS = \ + { + "WHITE": '\033[0;37m', + "L_RED": '\033[1;31m', + "L_PURPLE": '\033[1;35m', + "L_GREEN": '\033[0;32m', + "L_BLUE": '\033[1;34m', + "Yellow": '\033[0;33m' + } + + +class GeneralLogger(object): + + def __init__(self, name): + self.test_name = name + + def log_info(self, msg): + LOG.info("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_GREEN"], msg, COLORS["WHITE"])) + + def log_error(self, msg, trc_back=None): + LOG.error("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_RED"], msg, COLORS["WHITE"])) + if trc_back: + LOG.error("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_RED"], trc_back, COLORS["WHITE"])) + + def log_debug(self, msg): + LOG.debug("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_BLUE"], msg, COLORS["WHITE"])) + + def log_group(self, msg): + LOG.info("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["Yellow"], msg, COLORS["WHITE"])) diff --git a/valet/tests/tempest/scenario/resources.py b/valet/tests/tempest/scenario/resources.py new file mode 100644 index 0000000..dd8f5bf --- /dev/null +++ b/valet/tests/tempest/scenario/resources.py @@ -0,0 +1,88 @@ +''' +Created on May 23, 2016 + +@author: Yael +''' + +from oslo_log import log as logging +import traceback +import yaml + +LOG = logging.getLogger(__name__) + +TEMPLATE_RES = "resources" + + +class TemplateResources(object): + ''' Heat template parser ''' + def __init__(self, template): + self.instances = [] + self.groups = {} + self.template_data = None + + try: + with open(template, "r") as f: + self.template_data = f.read() + doc = yaml.load(self.template_data) + + for resource in doc[TEMPLATE_RES]: + resource_type = str(doc[TEMPLATE_RES][resource]["type"]) + if resource_type == "OS::Nova::Server": + self.instances.append(Instance(doc, resource)) + elif resource_type == "ATT::Valet::GroupAssignment": + self.groups[resource] = Group(doc, resource) + + except Exception: + LOG.error("Failed to initialize TemplateResources") + LOG.error(traceback.format_exc()) + + +class Instance(object): + def __init__(self, doc, instance_name): + self.resource_name = instance_name + self.name = None + self.image = None + self.flavor = None + self.key = None + + self.fill(doc, instance_name) + + def fill(self, doc, instance_name): + try: + template_property = doc[TEMPLATE_RES][instance_name]["properties"] + + self.name = template_property["name"] + self.image = template_property["image"] + self.flavor = template_property["flavor"] + + except Exception: + LOG.error("Failed to initialize Instance") + LOG.error(traceback.format_exc()) + + def get_ins(self): + return("type: %s, name: %s, image: %s, flavor: %s, resource_name: %s " + % (self.type, self.name, self.image, self.flavor, self.resource_name)) + + +class Group(object): + def __init__(self, doc, group_name): + self.group_type = None + self.group_name = None + self.level = None + self.group_resources = [] + + self.fill(doc, group_name) + + def fill(self, doc, group_name): + try: + template_property = doc[TEMPLATE_RES][group_name]["properties"] + + self.group_type = template_property["group_type"] + self.group_name = template_property["group_name"] if "group_name" in template_property else None + self.level = template_property["level"] + for res in template_property[TEMPLATE_RES]: + self.group_resources.append(res["get_resource"]) + + except Exception: + LOG.error("Failed to initialize Group") + LOG.error(traceback.format_exc()) diff --git a/valet/tests/tempest/scenario/scenario_base.py b/valet/tests/tempest/scenario/scenario_base.py new file mode 100644 index 0000000..3fd004b --- /dev/null +++ b/valet/tests/tempest/scenario/scenario_base.py @@ -0,0 +1,154 @@ +''' +Created on Nov 6, 2016 + +@author: Yael +''' + +import os +from tempest import config +from tempest import exceptions +from tempest import test +from tempest_lib.common.utils import data_utils +import time +import traceback +from valet.tests.tempest.scenario.analyzer import Analyzer +from valet.tests.tempest.scenario.resources import TemplateResources +from valet.tests.tempest.services.client import ValetClient + +CONF = config.CONF + + +class ScenarioTestCase(test.BaseTestCase): + credentials = ['primary'] + + @classmethod + def skip_checks(cls): + super(ScenarioTestCase, cls).skip_checks() + if not CONF.service_available.valet: + skip_msg = ("%s skipped as valet is not available" % cls.__name__) + raise cls.skipException(skip_msg) + + @classmethod + def resource_setup(cls): + super(ScenarioTestCase, cls).resource_setup() + cls.catalog_type = CONF.placement.catalog_type + + @classmethod + def resource_cleanup(cls): + super(ScenarioTestCase, cls).resource_cleanup() + + @classmethod + def setup_clients(cls): + super(ScenarioTestCase, cls).setup_clients() + cls.heat_client = cls.os.orchestration_client + cls.nova_client = cls.os.servers_client + cls.tenants_client = cls.os.identity_client + cls.valet_client = ValetClient(cls.os.auth_provider, + CONF.placement.catalog_type, + CONF.identity.region, + **cls.os.default_params_with_timeout_values) + + cls.possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)) + cls.stack_identifier = None + cls.tries = CONF.valet.TRIES_TO_CREATE + + def run_test(self, logger, stack_name, template_path): + ''' scenario - + + create new stack + checks if host (or rack) is the same for all instances + ''' + self.log = logger + self.log.log_info(" ******** Running Test ******** ") + tmplt_url = self.possible_topdir + template_path + template = TemplateResources(tmplt_url) + + env_data = self.get_env_file(tmplt_url) + + self.log.log_info(" ******** Creating Stack ******** ") + name = data_utils.rand_name(name=stack_name) + self.assertEqual(True, self.create_stack(name, env_data, template)) + + self.log.log_info(" ******** Analyzing Stack ******** ") + analyzer = Analyzer(self.log, self.stack_identifier, self.heat_client, self.nova_client) + self.assertEqual(True, analyzer.check(template)) + + self.log.log_info(" ********** THE END ****************") + + def create_stack(self, stack_name, env_data, template_resources): + try: + groups = template_resources.groups + + for key in groups: + if groups[key].group_type == "exclusivity": + self.log.log_info(" creating group ") + grp_name = data_utils.rand_name(name=groups[key].group_name) + template_resources.template_data = template_resources.template_data.replace(groups[key].group_name, grp_name) + self.create_valet_group(grp_name) + + for instance in template_resources.instances: + generated_name = data_utils.rand_name(instance.name) + template_resources.template_data = template_resources.template_data.replace(instance.name, generated_name) + instance.name = generated_name + + self.wait_for_stack(stack_name, env_data, template_resources) + self.addCleanup(self.delete_stack) + + except Exception: + self.log.log_error("Failed to create stack", traceback.format_exc()) + return False + return True + + def create_valet_group(self, group_name): + try: + v_group = self.valet_client.create_group(name=group_name, group_type='exclusivity', description="description") + group_id = v_group['id'] + tenant_id = self.tenants_client.tenant_id + self.addCleanup(self._delete_group, group_id) + + self.valet_client.add_members(group_id, [tenant_id]) + + except Exception: + self.log.log_error("Failed to create valet group", traceback.format_exc()) + + def get_env_file(self, template): + env_url = template.replace(".yml", ".env") + + if os.path.exists(env_url): + with open(env_url, "r") as f: + return f.read() + else: + return None + + def _delete_group(self, group_id): + self.valet_client.delete_all_members(group_id) + self.valet_client.delete_group(group_id) + + def delete_stack(self): + self.heat_client.delete_stack(self.stack_identifier) + self.heat_client.wait_for_stack_status(self.stack_identifier, "DELETE_COMPLETE", failure_pattern='^.*DELETE_FAILED$') + + def show_stack(self, stack_id): + return self.heat_client.show_stack(stack_id) + + def wait_for_stack(self, stack_name, env_data, template_resources): + try: + self.log.log_info("Trying to create stack") + new_stack = self.heat_client.create_stack(stack_name, environment=env_data, template=template_resources.template_data) + stack_id = new_stack["stack"]["id"] + self.stack_identifier = stack_name + "/" + stack_id + + self.heat_client.wait_for_stack_status(self.stack_identifier, "CREATE_COMPLETE", failure_pattern='^.*CREATE_FAILED$') + + except exceptions.StackBuildErrorException as ex: + if "Ostro error" in str(ex): + if self.tries > 0: + self.log.log_error("Ostro error - try number %d" % (CONF.valet.TRIES_TO_CREATE - self.tries + 2)) + self.tries -= 1 + self.delete_stack() + time.sleep(CONF.valet.PAUSE) + self.wait_for_stack(stack_name, env_data, template_resources) + else: + raise + else: + self.log.log_error("Failed to create stack", traceback.format_exc()) diff --git a/valet/tests/tempest/scenario/templates/__init__.py b/valet/tests/tempest/scenario/templates/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/tempest/scenario/templates/affinity_basic_2_instances.env b/valet/tests/tempest/scenario/templates/affinity_basic_2_instances.env new file mode 100644 index 0000000..c29a0c8 --- /dev/null +++ b/valet/tests/tempest/scenario/templates/affinity_basic_2_instances.env @@ -0,0 +1,3 @@ +parameters: + instance_image: cirros-0.3.4-x86_64 + instance_flavor: m1.tiny diff --git a/valet/tests/tempest/scenario/templates/affinity_basic_2_instances.yml b/valet/tests/tempest/scenario/templates/affinity_basic_2_instances.yml new file mode 100644 index 0000000..bea779c --- /dev/null +++ b/valet/tests/tempest/scenario/templates/affinity_basic_2_instances.yml @@ -0,0 +1,48 @@ +heat_template_version: 2015-04-30 + +description: Affinity template - all VMs should be deployed on the same host + +parameters: + instance_image: + type: string + + instance_flavor: + type: string + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: affinity-1 + # key_name: ssh-key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + # networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + my-instance-2: + type: OS::Nova::Server + properties: + name: affinity-2 + #key_name: ssh-key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + #networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + # # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + test-affinity-group3: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + +outputs: + instance_name-1: + value: { get_attr: [my-instance-1, name] } + instance_name-2: + value: { get_attr: [my-instance-2, name] } diff --git a/valet/tests/tempest/scenario/templates/diversity_basic_2_instances.env b/valet/tests/tempest/scenario/templates/diversity_basic_2_instances.env new file mode 100644 index 0000000..5694c26 --- /dev/null +++ b/valet/tests/tempest/scenario/templates/diversity_basic_2_instances.env @@ -0,0 +1,3 @@ +parameters: + instance_image: cirros-0.3.4-x86_64 + instance_flavor: m1.tiny \ No newline at end of file diff --git a/valet/tests/tempest/scenario/templates/diversity_basic_2_instances.yml b/valet/tests/tempest/scenario/templates/diversity_basic_2_instances.yml new file mode 100644 index 0000000..62156fa --- /dev/null +++ b/valet/tests/tempest/scenario/templates/diversity_basic_2_instances.yml @@ -0,0 +1,58 @@ +heat_template_version: 2015-04-30 + +description: Diversity template - all VMs should be deployed on different hosts + +parameters: + instance_image: + type: string + + instance_flavor: + type: string + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: test diversity -1 + #key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + #networks: + #- network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + my-instance-2: + type: OS::Nova::Server + properties: + name: test diversity -2 + #key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + #networks: + #- network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + # my-instance-3: + # type: OS::Nova::Server + # properties: + # name: test-3 + # key_name: my_key + # image: cirros-0.3.4-x86_64-disk.img + # flavor: m1.small + # networks: + # - network: 0c405776-0c37-4212-a814-e8aa539b32b5 + + test-diversity-group: + type: ATT::Valet::GroupAssignment + properties: + group_type: diversity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + #- {get_resource: my-instance-3} + +outputs: + instance_name-1: + value: { get_attr: [my-instance-1, name] } + instance_name-2: + value: { get_attr: [my-instance-2, name] } \ No newline at end of file diff --git a/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.env b/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.env new file mode 100644 index 0000000..5694c26 --- /dev/null +++ b/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.env @@ -0,0 +1,3 @@ +parameters: + instance_image: cirros-0.3.4-x86_64 + instance_flavor: m1.tiny \ No newline at end of file diff --git a/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml b/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml new file mode 100644 index 0000000..28d5994 --- /dev/null +++ b/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml @@ -0,0 +1,83 @@ +heat_template_version: 2015-04-30 + +description: Nested affinity and diversity template - Host level diversity between 2 groups of affinity + +parameters: + instance_image: + type: string + + instance_flavor: + type: string + +resources: + + my-instance-1: + type: OS::Nova::Server + properties: + name: test-1 + #key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + + my-instance-2: + type: OS::Nova::Server + properties: + name: test-2 + #key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + + my-instance-3: + type: OS::Nova::Server + properties: + name: test-3 + # key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + + my-instance-4: + type: OS::Nova::Server + properties: + name: test-4 + #key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + + + test-affinity-group1: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + + test-affinity-group2: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: host + resources: + - {get_resource: my-instance-3} + - {get_resource: my-instance-4} + + test-diversity-group: + type: ATT::Valet::GroupAssignment + properties: + group_type: diversity + level: host + resources: + - {get_resource: test-affinity-group1} + - {get_resource: test-affinity-group2} + + +outputs: + instance_name-1: + value: { get_attr: [my-instance-1, name] } + instance_name-2: + value: { get_attr: [my-instance-2, name] } + instance_name-3: + value: { get_attr: [my-instance-3, name] } + instance_name-4: + value: { get_attr: [my-instance-4, name] } diff --git a/valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.env b/valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.env new file mode 100644 index 0000000..5694c26 --- /dev/null +++ b/valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.env @@ -0,0 +1,3 @@ +parameters: + instance_image: cirros-0.3.4-x86_64 + instance_flavor: m1.tiny \ No newline at end of file diff --git a/valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.yml b/valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.yml new file mode 100644 index 0000000..16e2c83 --- /dev/null +++ b/valet/tests/tempest/scenario/templates/exclusivity_basic_2_instances.yml @@ -0,0 +1,55 @@ +heat_template_version: 2015-04-30 + +description: Simple template + +parameters: + instance_image: + type: string + + instance_flavor: + type: string + +resources: + my-instance-1: + type: OS::Nova::Server + properties: + name: test exclusivity-1 + #key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + + my-instance-2: + type: OS::Nova::Server + properties: + name: test exclusivity-2 + #key_name: my_key + image: { get_param: instance_image } + flavor: { get_param: instance_flavor } + + # my-instance-3: + # type: OS::Nova::Server + # properties: + # name: test-3 + # #key_name: my_key + # image: cirros-0.3.4-x86_64-disk.img + # flavor: m1.tiny + + test-exclusivity-group: + type: ATT::Valet::GroupAssignment + properties: + group_type: exclusivity + group_name: template_group + level: host + resources: + - {get_resource: my-instance-1} + - {get_resource: my-instance-2} + + + # test-diversity-group: + # type: ATT::CloudQoS::ResourceGroup + # properties: + # relationship: diversity + # level: host + # resources: + # - {get_resource: my-instance-1} + # - {get_resource: my-instance-3} diff --git a/valet/tests/tempest/scenario/tests/__init__.py b/valet/tests/tempest/scenario/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/tempest/scenario/tests/test_affinity.py b/valet/tests/tempest/scenario/tests/test_affinity.py new file mode 100644 index 0000000..3b507b9 --- /dev/null +++ b/valet/tests/tempest/scenario/tests/test_affinity.py @@ -0,0 +1,15 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from valet.tests.tempest.scenario.general_logger import GeneralLogger +from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase + + +class TestAffinity(ScenarioTestCase): + + def test_affinity(self): + logger = GeneralLogger("test_affinity") + self.run_test(logger, "affinity", "/templates/affinity_basic_2_instances.yml") diff --git a/valet/tests/tempest/scenario/tests/test_diversity.py b/valet/tests/tempest/scenario/tests/test_diversity.py new file mode 100644 index 0000000..655d317 --- /dev/null +++ b/valet/tests/tempest/scenario/tests/test_diversity.py @@ -0,0 +1,15 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from valet.tests.tempest.scenario.general_logger import GeneralLogger +from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase + + +class TestDiversity(ScenarioTestCase): + + def test_diversity(self): + logger = GeneralLogger("test_diversity") + self.run_test(logger, "diversity", "/templates/diversity_basic_2_instances.yml") diff --git a/valet/tests/tempest/scenario/tests/test_exclusivity.py b/valet/tests/tempest/scenario/tests/test_exclusivity.py new file mode 100644 index 0000000..67e4201 --- /dev/null +++ b/valet/tests/tempest/scenario/tests/test_exclusivity.py @@ -0,0 +1,15 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from valet.tests.tempest.scenario.general_logger import GeneralLogger +from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase + + +class TestExclusivity(ScenarioTestCase): + + def test_exclusivity(self): + logger = GeneralLogger("test_exclusivity") + self.run_test(logger, "exclusivity", "/templates/exclusivity_basic_2_instances.yml") diff --git a/valet/tests/tempest/scenario/tests/test_nested.py b/valet/tests/tempest/scenario/tests/test_nested.py new file mode 100644 index 0000000..fb1b4a9 --- /dev/null +++ b/valet/tests/tempest/scenario/tests/test_nested.py @@ -0,0 +1,15 @@ +''' +Created on May 4, 2016 + +@author: Yael +''' + +from valet.tests.tempest.scenario.general_logger import GeneralLogger +from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase + + +class TestNested(ScenarioTestCase): + + def test_nested(self): + logger = GeneralLogger("test_nested") + self.run_test(logger, "affinity_diversity", "/templates/diversity_between_2_affinity.yml") diff --git a/valet/tests/tempest/scenario/valet_group.py b/valet/tests/tempest/scenario/valet_group.py new file mode 100644 index 0000000..bc2e34b --- /dev/null +++ b/valet/tests/tempest/scenario/valet_group.py @@ -0,0 +1,105 @@ +''' +Created on Jul 3, 2016 + +@author: Yael +''' + +import json +import requests +import traceback +from valet.tests.functional.valet_validator.common.auth import Auth +from valet.tests.functional.valet_validator.common import GeneralLogger +from valet.tests.functional.valet_validator.common.init import CONF + + +class ValetGroup(object): + + def __init__(self): + self.groups_url = "%s/groups" % CONF.valet.HOST + + self.headers = {"X-Auth-Token": Auth.get_auth_token(), + "Content-Type": "application/json"} + + def create_group(self, group_name, group_type): + grp_data = {"name": group_name, "type": group_type} + return requests.post(self.groups_url, data=json.dumps(grp_data), headers=self.headers) + + def get_list_groups(self): + list_response = requests.get(self.groups_url, headers=self.headers) + return list_response.json()["groups"] + + def get_group_details(self, group_id): + url = self.groups_url + "/" + group_id + return requests.get(url, headers=self.headers) + + def update_group_members(self, group_id, members=None): + add_member_url = self.groups_url + "/%s/members" % group_id + data = json.dumps({"members": [members or Auth.get_project_id()]}) + + return requests.put(add_member_url, data=data, headers=self.headers) + + def update_group(self, group_id, new_description): + url = self.groups_url + "/" + group_id + new_data = json.dumps({"description": new_description}) + + return requests.put(url, new_data, headers=self.headers) + + def delete_group_member(self, group_id, member_id): + url = self.groups_url + "/%s/members/%s" % (group_id, member_id) + return requests.delete(url, headers=self.headers) + + def delete_all_group_member(self, group_id): + url = self.groups_url + "/%s/members" % group_id + return requests.delete(url, headers=self.headers) + + def delete_group(self, group_id): + url = self.groups_url + "/%s" % group_id + return requests.delete(url, headers=self.headers) + + def get_group_id_and_members(self, group_name, group_type="exclusivity"): + ''' Checks if group name exists, if not - creates it + + returns group's id and members list + ''' + group_details = self.check_group_exists(group_name) + + try: + if group_details is None: + GeneralLogger.log_info("Creating group") + create_response = self.create_group(group_name, group_type) + return create_response.json()["id"], create_response.json()["members"] + else: + GeneralLogger.log_info("Group exists") + + return group_details + except Exception: + GeneralLogger.log_error(traceback.format_exc()) + + def add_group_member(self, group_details): + ''' Checks if member exists in group, if not - adds it ''' + # group_details - group id, group members + try: + if Auth.get_project_id() not in group_details[1]: + GeneralLogger.log_info("Adding member to group") + self.update_group_members(group_details[0]) + except Exception: + GeneralLogger.log_error("Failed to add group member", traceback.format_exc()) + + def check_group_exists(self, group_name): + ''' Checks if group exists in group list, if not returns None ''' + for grp in self.get_list_groups(): + if grp["name"] == group_name: + return grp["id"], grp["members"] + + return None + + def delete_all_groups(self): + DELETED = 204 + for group in self.get_list_groups(): + codes = [self.delete_all_group_member(group["id"]).status_code, self.delete_group(group["id"]).status_code] + + res = filter(lambda a: a != DELETED, codes) + if res: + return res[0] + + return DELETED diff --git a/valet/tests/tempest/services/__init__.py b/valet/tests/tempest/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/tempest/services/client.py b/valet/tests/tempest/services/client.py new file mode 100644 index 0000000..5726b70 --- /dev/null +++ b/valet/tests/tempest/services/client.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from tempest_lib.common import rest_client + + +class ValetClient(rest_client.RestClient): + + """Tempest REST client for Valet. + + Implements + 1. create, delete, update, list and show groups + 2. add, verify, delete and delete all members + 3. create, update and delete plan + """ + + def _resp_helper(self, resp, body=None): + if body: + body = json.loads(body) + return rest_client.ResponseBody(resp, body) + + def list_groups(self): + resp, body = self.get('/groups') + self.expected_success(200, resp.status) + return self._resp_helper(resp, body) + + def create_group(self, name, group_type, description): + params = { + "name": name, + "type": group_type, + "description": description, + } + req_body = json.dumps(params) + resp, body = self.post('/groups', req_body) + self.expected_success(201, resp.status) + return self._resp_helper(resp, body) + + def delete_group(self, group_id): + resp, body = self.delete('/groups/%s' % str(group_id)) + self.expected_success(204, resp.status) + return self._resp_helper(resp, body) + + def update_group(self, group_id, description): + params = { + 'description': description + } + req_body = json.dumps(params) + resp, body = self.put('/groups/%s' % group_id, req_body) + self.expected_success(201, resp.status) + return self._resp_helper(resp, body) + + def show_group(self, group_id): + resp, body = self.get('/groups/%s' % group_id) + self.expected_success(200, resp.status) + return self._resp_helper(resp, body) + + def add_members(self, group_id, members): + params = { + "members": members + } + data = json.dumps(params) + resp, body = self.put('/groups/%s/members' % (str(group_id)), data) + self.expected_success(201, resp.status) + return self._resp_helper(resp, body) + + def verify_membership(self, group_id, member_id): + resp, body = self.get('/groups/%s/members/%s' % (str(group_id), + str(member_id))) + self.expected_success(204, resp.status) + return self._resp_helper(resp, body) + + def delete_member(self, group_id, member_id): + resp, body = self.delete('/groups/%s/members/%s' % (str(group_id), + str(member_id))) + self.expected_success(204, resp.status) + return self._resp_helper(resp, body) + + def delete_all_members(self, group_id): + resp, body = self.delete('/groups/%s/members' % (str(group_id))) + self.expected_success(204, resp.status) + return self._resp_helper(resp, body) + + def create_plan(self, plan_name, resources, stack_id): + params = { + "plan_name": plan_name, + "stack_id": stack_id, + "resources": resources + } + data = json.dumps(params) + resp, body = self.post('/plans', data) + self.expected_success(201, resp.status) + return self._resp_helper(resp, body) + + def update_plan(self, plan_id, action, excluded_hosts, resources): + params = { + "action": action, + "excluded_hosts": excluded_hosts, + "resources": resources + } + data = json.dumps(params) + resp, body = self.put('/plans/%s' % (str(plan_id)), data) + self.expected_success(201, resp.status) + return self._resp_helper(resp, body) + + def delete_plan(self, plan_id): + resp, body = self.delete('/plans/%s' % (str(plan_id))) + self.expected_success(204, resp.status) + return self._resp_helper(resp, body) diff --git a/valet/tests/unit/__init__.py b/valet/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/api/__init__.py b/valet/tests/unit/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/api/common/__init__.py b/valet/tests/unit/api/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/api/common/test_hooks.py b/valet/tests/unit/api/common/test_hooks.py new file mode 100644 index 0000000..aa56bd7 --- /dev/null +++ b/valet/tests/unit/api/common/test_hooks.py @@ -0,0 +1,76 @@ +''' +Created on Sep 29, 2016 + +@author: stack +''' + +import mock +import valet.api.common.hooks as hooks +from valet.api.common.hooks import MessageNotificationHook +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestHooks(ApiBase): + + def setUp(self): + super(TestHooks, self).setUp() + + self.message_notification_hook = MessageNotificationHook() + + @mock.patch.object(hooks, 'threading') + @mock.patch.object(hooks, 'conf') + @mock.patch.object(hooks, 'webob') + def test_after_ok(self, mock_bob, mock_conf, mock_threading): + mock_bob.exc.status_map = {"test_status_code": State} + mock_bob.exc.HTTPOk = State + mock_conf.messaging.notifier.return_value = "notifier" + mock_conf.messaging.timeout = 1 + + self.message_notification_hook.after(State) + # print (dir(mock_conf)) + # self.validate_test(mock_conf.messaging.notifier.info.called) + + self.validate_test(mock_threading.Thread.called) + mock_threading.Thread.assert_called_once_with(target=mock_conf.messaging.notifier.info, args=( + {}, + 'api', {'response': {'body': State.response.body, 'status_code': State.response.status_code}, + 'context': State.request.context, + 'request': {'path': 'test_path', 'method': 'test_method', 'body': None}} + ), ) + + @mock.patch.object(hooks, 'threading') + @mock.patch.object(hooks, 'conf') + @mock.patch.object(hooks, 'webob') + def test_after_with_error(self, mock_bob, mock_conf, mock_threading): + mock_bob.exc.status_map = {"test_status_code": State} + mock_conf.messaging.notifier.return_value = "notifier" + mock_conf.messaging.timeout = 1 + + mock_bob.exc.HTTPOk = ApiBase + self.message_notification_hook.after(State) + + # self.validate_test(mock_conf.messaging.notifier.error.called) + self.validate_test(mock_threading.Thread.called) + + mock_threading.Thread.assert_called_once_with(target=mock_conf.messaging.notifier.error, args=( + {}, + 'api', {'response': {'body': State.response.body, 'status_code': State.response.status_code}, + 'context': State.request.context, + 'request': {'path': 'test_path', 'method': 'test_method', 'body': None}} + ), ) + + +class State(object): + class response(object): + status_code = "test_status_code" + body = "test_body" + + class request(object): + path = "test_path" + method = "test_method" + body = "test_req_body" + context = {'tenant_id': 'test_tenant_id', 'user_id': 'test_user_id'} + + @classmethod + def path_info_pop(cls): + return None diff --git a/valet/tests/unit/api/common/test_identity.py b/valet/tests/unit/api/common/test_identity.py new file mode 100644 index 0000000..3189d2b --- /dev/null +++ b/valet/tests/unit/api/common/test_identity.py @@ -0,0 +1,53 @@ +''' +Created on Sep 29, 2016 + +@author: stack +''' + +import mock +from valet.api.common.identity import Identity +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestIdentity(ApiBase): + + def setUp(self): + super(TestIdentity, self).setUp() + + kwargs = {'username': 'admin', 'tenant_name': 'demo', 'password': 'qwer4321', 'auth_url': 'http://controller:5000/v2.0'} + + self.identity = Identity(**kwargs) + + def test_is_token_admin(self): + self.validate_test(self.identity.is_token_admin(TokenT)) + self.validate_test(not self.identity.is_token_admin(TokenF)) + + def test_tenant_from_token(self): + self.validate_test(self.identity.tenant_from_token(TokenT) == "cb9c9997fc6e41cc87186de92aa0a099") + + def test_user_from_token(self): + self.validate_test(self.identity.user_from_token(TokenT) == "cb9c9997fc6e41cc87186de92aa0a099") + + def test_client(self): + with mock.patch('valet.api.common.identity.client'): + self.identity.client() + + def test_validate_token(self): + self.validate_test(self.identity.validate_token("auth_token") is None) + + with mock.patch('valet.api.common.identity.client'): + self.validate_test(self.identity.validate_token("auth_token") is not None) + + def test_is_tenant_list_validself(self): + with mock.patch('valet.api.common.identity.client'): + self.validate_test(self.identity.is_tenant_list_valid(["a", "b"]) is False) + + +class TokenT(object): + user = {'roles': [{'name': 'user'}, {'name': 'heat_stack_owner'}, {'name': 'admin'}], 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} + tenant = {'description': 'Demo Project', 'enabled': True, 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} + + +class TokenF(object): + user = {'roles': []} + tenant = {'description': 'Demo Project', 'enabled': True, 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} diff --git a/valet/tests/unit/api/common/test_messaging.py b/valet/tests/unit/api/common/test_messaging.py new file mode 100644 index 0000000..5f58d87 --- /dev/null +++ b/valet/tests/unit/api/common/test_messaging.py @@ -0,0 +1,27 @@ +''' +Created on Sep 28, 2016 + +@author: stack +''' + +import mock +import valet.api.common.messaging as messaging +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestMessaging(ApiBase): + + def setUp(self): + super(TestMessaging, self).setUp() + + @mock.patch.object(messaging, 'cfg') + @mock.patch.object(messaging, 'conf') + @mock.patch.object(messaging, 'messaging') + def test_messaging(self, mock_msg, mock_conf, mock_cfg): + mock_conf.messaging.config = {"transport_url": "test_transport_url"} + mock_msg.get_transport.return_value = "get_transport_method" + mock_msg.Notifier.return_value = "Notifier" + + messaging.init_messaging() + + self.validate_test("Notifier" in mock_conf.messaging.notifier) diff --git a/valet/tests/unit/api/common/test_ostro_helper.py b/valet/tests/unit/api/common/test_ostro_helper.py new file mode 100644 index 0000000..15bf8c7 --- /dev/null +++ b/valet/tests/unit/api/common/test_ostro_helper.py @@ -0,0 +1,140 @@ +''' +Created on Sep 27, 2016 + +@author: stack +''' + +import mock +import valet.api.common.ostro_helper as helper +from valet.api.common.ostro_helper import Ostro +import valet.api.db.models as models +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestOstroHelper(ApiBase): + + def setUp(self): + super(TestOstroHelper, self).setUp() + + self.ostro = self.init_Ostro() + + @mock.patch.object(helper, 'conf') + def init_Ostro(self, mock_conf): + mock_conf.ostro = {} + mock_conf.ostro["tries"] = 10 + mock_conf.ostro["interval"] = 1 + + return Ostro() + + def test_build_request(self): + kwargs = {'tenant_id': 'test_tenant_id', + 'args': {'stack_id': 'test_stack_id', + 'plan_name': 'test_plan_name', + 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', + 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], + 'group_type': 'affinity', + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} + self.validate_test(self.ostro.build_request(**kwargs)) + + kwargs = {'tenant_id': 'test_tenant_id', + 'args': {'stack_id': 'test_stack_id', + 'plan_name': 'test_plan_name', + 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', + 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], + 'group_type': 'affinity', + 'group_name': "test_group_name", + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} + self.validate_test(not self.ostro.build_request(**kwargs)) + self.validate_test("conflict" in self.ostro.error_uri) + + kwargs = {'tenant_id': 'test_tenant_id', + 'args': {'stack_id': 'test_stack_id', + 'plan_name': 'test_plan_name', + 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', + 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], + 'group_type': 'exclusivity', + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} + self.validate_test(not self.ostro.build_request(**kwargs)) + self.validate_test("invalid" in self.ostro.error_uri) + + kwargs = {'tenant_id': 'test_tenant_id', + 'args': {'stack_id': 'test_stack_id', + 'plan_name': 'test_plan_name', + 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', + 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], + 'group_type': 'exclusivity', + 'group_name': "test_group_name", + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} + self.validate_test(not self.ostro.build_request(**kwargs)) + self.validate_test("not_found" in self.ostro.error_uri) + + kwargs = {'tenant_id': 'test_tenant_id', + 'args': {'stack_id': 'test_stack_id', + 'plan_name': 'test_plan_name', + 'timeout': '60 sec', + 'resources': {'ca039d18-1976-4e13-b083-edb12b806e25': {'Type': 'ATT::Valet::GroupAssignment', + 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], + 'group_type': 'non_type', + 'group_name': "test_group_name", + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} + self.validate_test(not self.ostro.build_request(**kwargs)) + self.validate_test("invalid" in self.ostro.error_uri) + + @mock.patch.object(helper, 'uuid') + def test_ping(self, mock_uuid): + mock_uuid.uuid4.return_value = "test_stack_id" + self.ostro.ping() + + self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") + + def test_is_request_serviceable(self): + self.ostro.request = {'resources': {"bla": {'type': "OS::Nova::Server"}}} + self.validate_test(self.ostro.is_request_serviceable()) + + self.ostro.request = {} + self.validate_test(not self.ostro.is_request_serviceable()) + + def test_replan(self): + kwargs = {'args': {'stack_id': 'test_stack_id', + 'locations': 'test_locations', + 'orchestration_id': 'test_orchestration_id', + 'exclusions': 'test_exclusions'}} + self.ostro.replan(**kwargs) + + self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") + self.validate_test(self.ostro.request['locations'] == "test_locations") + self.validate_test(self.ostro.request['orchestration_id'] == "test_orchestration_id") + self.validate_test(self.ostro.request['exclusions'] == "test_exclusions") + + def test_migrate(self): + kwargs = {'args': {'stack_id': 'test_stack_id', + 'excluded_hosts': 'test_excluded_hosts', + 'orchestration_id': 'test_orchestration_id'}} + self.ostro.migrate(**kwargs) + + self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") + self.validate_test(self.ostro.request['excluded_hosts'] == "test_excluded_hosts") + self.validate_test(self.ostro.request['orchestration_id'] == "test_orchestration_id") + + @mock.patch.object(helper, 'uuid') + def test_query(self, mock_uuid): + mock_uuid.uuid4.return_value = "test_stack_id" + kwargs = {'args': {'type': 'test_type', + 'parameters': 'test_parameters'}} + self.ostro.query(**kwargs) + + self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") + self.validate_test(self.ostro.request['type'] == "test_type") + self.validate_test(self.ostro.request['parameters'] == "test_parameters") + + @mock.patch.object(models, 'PlacementRequest', mock.MagicMock) + @mock.patch.object(models, 'Query', mock.MagicMock) + def test_send(self): + self.ostro.args = {'stack_id': 'test_stack_id'} + self.ostro.send() + self.validate_test("server_error" in self.ostro.error_uri) diff --git a/valet/tests/unit/api/db/__init__.py b/valet/tests/unit/api/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/api/db/test_groups.py b/valet/tests/unit/api/db/test_groups.py new file mode 100644 index 0000000..17e1cbb --- /dev/null +++ b/valet/tests/unit/api/db/test_groups.py @@ -0,0 +1,48 @@ +''' +Created on Sep 26, 2016 + +@author: stack +''' + +import mock +from valet.api.db.models.music import Base +from valet.api.db.models.music.groups import Group +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestGroups(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestGroups, self).setUp() + + self.group = self.init_group() + + @mock.patch.object(Base, 'insert') + def init_group(self, mock_insert): + mock_insert.return_value = None + members = ["me", "you"] + return Group("test_name", "test_description", "test_type", members) + + def test__repr__(self): + self.validate_test("test_name" in self.group.__repr__()) + + def test__json__(self): + json = self.group.__json__() + + self.validate_test(json["name"] == "test_name") + self.validate_test(json["type"] == "test_type") + self.validate_test(json["description"] == "test_description") + + def test_pk_name(self): + self.validate_test(self.group.pk_name() == "id") + + def test_pk_value(self): + self.validate_test(self.group.pk_value() is None) + + def test_values(self): + val = self.group.values() + + self.validate_test(val["name"] == "test_name") + self.validate_test(val["type"] == "test_type") + self.validate_test(val["description"] == "test_description") diff --git a/valet/tests/unit/api/db/test_ostro.py b/valet/tests/unit/api/db/test_ostro.py new file mode 100644 index 0000000..a8498f7 --- /dev/null +++ b/valet/tests/unit/api/db/test_ostro.py @@ -0,0 +1,99 @@ +''' +Created on Sep 26, 2016 + +@author: stack +''' + +from valet.api.db.models.music.ostro import PlacementRequest, PlacementResult, Event +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestOstro(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestOstro, self).setUp() + + self.placement_request = self.init_PlacementRequest() + + self.placement_result = self.init_PlacementResult() + + self.event = self.init_Event() + + def init_PlacementRequest(self): + return PlacementRequest("test_request", "test_stack_id", False) + + def init_PlacementResult(self): + return PlacementResult("test_placement", "test_stack_id", False) + + def init_Event(self): + return Event("test_event", "test_event_id", False) + + def test__repr__(self): + self.validate_test("test_stack_id" in self.placement_request.__repr__()) + + self.validate_test("test_stack_id" in self.placement_result.__repr__()) + + self.validate_test("test_event_id" in self.event.__repr__()) + + def test__json__(self): + request_json = self.placement_request.__json__() + + self.validate_test(request_json["request"] == "test_request") + self.validate_test(request_json["stack_id"] == "test_stack_id") + + result_json = self.placement_result.__json__() + + self.validate_test(result_json["placement"] == "test_placement") + self.validate_test(result_json["stack_id"] == "test_stack_id") + + event_json = self.event.__json__() + + self.validate_test(event_json["event_id"] == "test_event_id") + self.validate_test(event_json["event"] == "test_event") + + def test_pk_name(self): + self.validate_test(self.placement_request.pk_name() == "stack_id") + + self.validate_test(self.placement_result.pk_name() == "stack_id") + + self.validate_test(self.event.pk_name() == "event_id") + + def test_pk_value(self): + self.validate_test(self.placement_request.pk_value() == "test_stack_id") + + self.validate_test(self.placement_result.pk_value() == "test_stack_id") + + self.validate_test(self.event.pk_value() == "test_event_id") + + def test_values(self): + request_val = self.placement_request.values() + + self.validate_test(request_val["request"] == "test_request") + self.validate_test(request_val["stack_id"] == "test_stack_id") + + result_val = self.placement_result.values() + + self.validate_test(result_val["placement"] == "test_placement") + self.validate_test(result_val["stack_id"] == "test_stack_id") + + event_val = self.event.values() + + self.validate_test(event_val["event"] == "test_event") + self.validate_test(event_val["event_id"] == "test_event_id") + + def test_schema(self): + request_schema = self.placement_request.schema() + + self.validate_test(request_schema["request"] == "text") + self.validate_test(request_schema["stack_id"] == "text") + + result_schema = self.placement_result.schema() + + self.validate_test(result_schema["placement"] == "text") + self.validate_test(result_schema["stack_id"] == "text") + + event_schema = self.event.schema() + + self.validate_test(event_schema["event"] == "text") + self.validate_test(event_schema["event_id"] == "text") diff --git a/valet/tests/unit/api/db/test_placements.py b/valet/tests/unit/api/db/test_placements.py new file mode 100644 index 0000000..c259ea9 --- /dev/null +++ b/valet/tests/unit/api/db/test_placements.py @@ -0,0 +1,47 @@ +''' +Created on Sep 26, 2016 + +@author: stack +''' + +import mock +from valet.api.db.models.music import Base +from valet.api.db.models import Placement, Plan +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestPlacement(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestPlacement, self).setUp() + + self.placement = self.init_Placement() + + @mock.patch.object(Base, 'insert') + def init_Placement(self, mock_insert): + mock_insert.return_value = None + return Placement("test_name", "test_orchestration_id", plan=Plan("plan_name", "stack_id", _insert=False), location="test_location", _insert=False) + + def test__repr__(self): + self.validate_test("test_name" in self.placement.__repr__()) + + def test__json__(self): + json = self.placement.__json__() + + self.validate_test(json["name"] == "test_name") + self.validate_test(json["location"] == "test_location") + self.validate_test(json["orchestration_id"] == "test_orchestration_id") + + def test_pk_name(self): + self.validate_test(self.placement.pk_name() == "id") + + def test_pk_value(self): + self.validate_test(self.placement.pk_value() is None) + + def test_values(self): + val = self.placement.values() + + self.validate_test(val["name"] == "test_name") + self.validate_test(val["location"] == "test_location") + self.validate_test(val["orchestration_id"] == "test_orchestration_id") diff --git a/valet/tests/unit/api/db/test_plans.py b/valet/tests/unit/api/db/test_plans.py new file mode 100644 index 0000000..9949339 --- /dev/null +++ b/valet/tests/unit/api/db/test_plans.py @@ -0,0 +1,41 @@ +''' +Created on Sep 26, 2016 + +@author: stack +''' + +from valet.api.db.models import Plan +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestPlans(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestPlans, self).setUp() + + self.plan = self.init_Plan() + + def init_Plan(self): + return Plan("test_name", "test_stack_id", _insert=False) + + def test__repr__(self): + self.validate_test("test_name" in self.plan.__repr__()) + + def test__json__(self): + json = self.plan.__json__() + + self.validate_test(json["name"] == "test_name") + self.validate_test(json["stack_id"] == "test_stack_id") + + def test_pk_name(self): + self.validate_test(self.plan.pk_name() == "id") + + def test_pk_value(self): + self.validate_test(self.plan.pk_value() is None) + + def test_values(self): + val = self.plan.values() + + self.validate_test(val["name"] == "test_name") + self.validate_test(val["stack_id"] == "test_stack_id") diff --git a/valet/tests/unit/api/v1/__init__.py b/valet/tests/unit/api/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/api/v1/api_base.py b/valet/tests/unit/api/v1/api_base.py new file mode 100644 index 0000000..50b155a --- /dev/null +++ b/valet/tests/unit/api/v1/api_base.py @@ -0,0 +1,23 @@ +''' +Created on Sep 25, 2016 + +@author: stack +''' + +import mock +import pecan +from valet.tests.base import Base + + +class ApiBase(Base): + + def setUp(self): + super(ApiBase, self).setUp() + pecan.conf.identity = mock.MagicMock() + pecan.conf.music = mock.MagicMock() + self.response = None + pecan.core.state = mock.MagicMock() + + @classmethod + def mock_error(cls, url, msg=None, **kwargs): + cls.response = msg diff --git a/valet/tests/unit/api/v1/test_groups.py b/valet/tests/unit/api/v1/test_groups.py new file mode 100644 index 0000000..a9175ad --- /dev/null +++ b/valet/tests/unit/api/v1/test_groups.py @@ -0,0 +1,231 @@ + +import mock +import pecan +from valet.api.db.models.music.groups import Group +from valet.api.db.models.music import Query, Results +import valet.api.v1.controllers.groups as groups +from valet.api.v1.controllers.groups import GroupsController, MembersController, GroupsItemController, MembersItemController +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestGroups(ApiBase): + ''' Unit tests for valet.api.v1.controllers.groups ''' + + def setUp(self): + super(TestGroups, self).setUp() + self.tenant_id = "testprojectid" + + # core.state = mock.MagicMock() + + # Testing class GroupsController + self.groups_controller = GroupsController() + + # Testing class MembersController + self.members_controller = MembersController() + + # Testing class GroupsItemController + self.groups_item_controller = self.init_GroupsItemController() + + # Testing class MembersItemController + self.members_item_controller = self.init_MembersItemController() + + @mock.patch.object(groups, 'request') + @mock.patch.object(Query, 'filter_by') + def init_GroupsItemController(self, mock_filter, mock_request): + mock_request.context = {} + mock_filter.return_value = Results([Group("test_name", "test_description", "test_type", None)]) + contrler = GroupsItemController("group_id") + + self.validate_test("test_name" == groups.request.context['group'].name) + return contrler + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + def test_init_GroupsItemController_unhappy(self): + GroupsItemController("group_id") + self.validate_test("Group not found" in TestGroups.response) + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + @mock.patch.object(groups, 'request') + def init_MembersItemController(self, mock_request): + grp = Group("test_member_item_name", "test_description", "test_type", None) + grp.members = ["demo members"] + mock_request.context = {'group': grp} + + MembersItemController("member_id") + self.validate_test("Member not found in group" in TestGroups.response) + + contrler = MembersItemController("demo members") + self.validate_test("test_member_item_name" == groups.request.context['group'].name) + return contrler + + def test_allow(self): + self.validate_test(self.groups_controller.allow() == 'GET,POST') + + self.validate_test(self.members_controller.allow() == 'PUT,DELETE') + + self.validate_test(self.groups_item_controller.allow() == "GET,PUT,DELETE") + + self.validate_test(self.members_item_controller.allow() == "GET,DELETE") + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + @mock.patch.object(groups, 'request') + def test_index(self, mock_request): + mock_request.method = "HEAD" + self.groups_controller.index() + self.validate_test("The HEAD method is not allowed" in TestGroups.response) + + mock_request.method = "GET" + self.members_controller.index() + self.validate_test("The GET method is not allowed" in TestGroups.response) + + mock_request.method = "POST" + self.groups_item_controller.index() + self.validate_test("The POST method is not allowed" in TestGroups.response) + + mock_request.method = "PUT" + self.members_item_controller.index() + self.validate_test("The PUT method is not allowed" in TestGroups.response) + + @mock.patch.object(groups, 'request') + def index_put(self, mock_request): + pecan.conf.identity.engine.is_tenant_list_valid.return_value = True + + mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + r = self.members_controller.index_put(members=[self.tenant_id]) + + self.validate_test(groups.response.status == 201) + self.validate_test(r.members[0] == self.tenant_id) + + return r + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + @mock.patch.object(groups, 'request') + def test_index_put_unhappy(self, mock_request): + pecan.conf.identity.engine.is_tenant_list_valid.return_value = False + + mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + self.members_controller.index_put(members=[self.tenant_id]) + + self.validate_test("Member list contains invalid tenant IDs" in TestGroups.response) + + @mock.patch.object(groups, 'tenant_servers_in_group') + @mock.patch.object(groups, 'request') + def test_index_put_delete(self, mock_request, mock_func): + grp_with_member = self.index_put() + + mock_request.context = {'group': grp_with_member} + mock_func.return_value = None + self.members_controller.index_delete() + + self.validate_test(groups.response.status == 204) + self.validate_test(grp_with_member.members == []) + + @mock.patch.object(groups, 'tenant_servers_in_group') + @mock.patch.object(groups, 'request') + def test_index_delete_member_item_controller(self, mock_request, mock_func): + grp = Group("test_name", "test_description", "test_type", None) + grp.members = ["demo members"] + + mock_request.context = {'group': grp, 'member_id': "demo members"} + mock_func.return_value = None + + self.members_item_controller.index_delete() + + self.validate_test(groups.response.status == 204) + self.validate_test(grp.members == []) + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + @mock.patch.object(groups, 'tenant_servers_in_group') + @mock.patch.object(groups, 'request') + def test_index_delete_member_item_controller_unhappy(self, mock_request, mock_func): + grp = Group("test_name", "test_description", "test_type", None) + grp.members = ["demo members"] + + mock_request.context = {'group': grp, 'member_id': "demo members"} + mock_func.return_value = None + + self.members_item_controller.index_delete() + + self.validate_test("Member not found in group" in TestGroups.response) + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + @mock.patch.object(groups, 'tenant_servers_in_group') + @mock.patch.object(groups, 'request') + def test_index_delete_unhappy(self, mock_request, mock_func): + grp_with_member = self.index_put() + + mock_request.context = {'group': grp_with_member} + mock_func.return_value = "Servers" + self.members_controller.index_delete() + + self.validate_test("has servers in group" in TestGroups.response) + + @mock.patch.object(groups, 'request') + def test_index_put_groups_item_controller(self, mock_request): + mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + r = self.groups_item_controller.index_put(description="new description") + + self.validate_test(groups.response.status == 201) + self.validate_test(r.description == "new description") + + mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + r = self.groups_item_controller.index_put() + + self.validate_test(groups.response.status == 201) + self.validate_test(r.description == "test_description") + + @mock.patch.object(groups, 'request') + def test_index_delete_groups_item_controller(self, mock_request): + mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + self.groups_item_controller.index_delete() + + self.validate_test(groups.response.status == 204) + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + @mock.patch.object(groups, 'request') + def test_index_delete_groups_item_controller_unhappy(self, mock_request): + grp = Group("test_name", "test_description", "test_type", None) + grp.members = ["demo members"] + mock_request.context = {'group': grp} + self.groups_item_controller.index_delete() + + self.validate_test(groups.response.status == 204) + self.validate_test("Unable to delete a Group with members." in TestGroups.response) + + @mock.patch.object(groups, 'request') + @mock.patch.object(Query, 'all') + def test_index_get(self, mock_all, mock_request): + all_groups = ["group1", "group2", "group3"] + mock_all.return_value = all_groups + response = self.groups_controller.index_get() + + mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + item_controller_response = self.groups_item_controller.index_get() + + self.members_item_controller.index_get() + self.validate_test(groups.response.status == 204) + + self.validate_test("test_name" in item_controller_response["group"].name) + self.validate_test(len(response) == 1) + self.validate_test(len(response["groups"]) == len(all_groups)) + self.validate_test(all_groups == response["groups"]) + + def test_index_post(self): + group = self.groups_controller.index_post(name="testgroup", description="test description", type="testtype") + + self.validate_test(groups.response.status == 201) + self.validate_test(group.name == "testgroup") + + @mock.patch.object(groups, 'error', ApiBase.mock_error) + def test_index_post_unhappy(self): + pecan.conf.music = None + self.groups_controller.index_post(name="testgroup", description="test description", type="testtype") + + self.validate_test("Unable to create Group" in TestGroups.response) + + def test_index_options(self): + self.groups_item_controller.index_options() + self.validate_test(groups.response.status == 204) + + self.members_item_controller.index_options() + self.validate_test(groups.response.status == 204) diff --git a/valet/tests/unit/api/v1/test_placements.py b/valet/tests/unit/api/v1/test_placements.py new file mode 100644 index 0000000..7ef1e3f --- /dev/null +++ b/valet/tests/unit/api/v1/test_placements.py @@ -0,0 +1,105 @@ +''' +Created on Sep 19, 2016 + +@author: stack +''' + +import mock +import valet.api.v1.controllers.placements as placements +from valet.api.v1.controllers.placements import PlacementsController, PlacementsItemController +from valet.api.db.models.music import Query, Results +from valet.api.db.models import Placement, Plan +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestPlacements(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestPlacements, self).setUp() + + self.placements_controller = PlacementsController() + self.placements_item_controller = self.init_PlacementsItemController() + + @mock.patch.object(placements, 'error', ApiBase.mock_error) + @mock.patch.object(Query, 'filter_by') + @mock.patch.object(placements, 'request') + def init_PlacementsItemController(self, mock_request, mock_filter): + mock_request.context = {} + mock_filter.return_value = Results(["", "second"]) + try: + PlacementsItemController("uuid4") + except Exception as e: + self.validate_test("'str' object has no attribute 'id'" in e) + self.validate_test("Placement not found" in ApiBase.response) + + mock_filter.return_value = Results([ + Placement("test_name", "test_orchestration_id", plan=Plan("plan_name", "stack_id", _insert=False), location="test_location", _insert=False)]) + + return PlacementsItemController("uuid4") + + def test_allow(self): + self.validate_test(self.placements_controller.allow() == 'GET') + + self.validate_test(self.placements_item_controller.allow() == 'GET,POST,DELETE') + + @mock.patch.object(placements, 'error', ApiBase.mock_error) + @mock.patch.object(placements, 'request') + def test_index(self, mock_request): + mock_request.method = "POST" + self.placements_controller.index() + self.validate_test("The POST method is not allowed" in ApiBase.response) + + mock_request.method = "PUT" + self.placements_item_controller.index() + self.validate_test("The PUT method is not allowed" in ApiBase.response) + + def test_index_options(self): + self.placements_controller.index_options() + self.validate_test(placements.response.status == 204) + + self.placements_item_controller.index_options() + self.validate_test(placements.response.status == 204) + + @mock.patch.object(Query, 'all') + def test_index_get(self, mock_all): + all_groups = ["group1", "group2", "group3"] + mock_all.return_value = all_groups + response = self.placements_controller.index_get() + + self.validate_test(len(response) == 1) + self.validate_test(len(response["placements"]) == len(all_groups)) + self.validate_test(all_groups == response["placements"]) + + response = self.placements_item_controller.index_get() + + self.validate_test("test_name" in response['placement'].name) + self.validate_test("test_orchestration_id" in response['placement'].orchestration_id) + self.validate_test("plan_name" in response['placement'].plan.name) + self.validate_test("stack_id" in response['placement'].plan.stack_id) + + @mock.patch.object(placements, 'error', ApiBase.mock_error) + @mock.patch.object(Query, 'filter_by', mock.MagicMock) + @mock.patch.object(placements, 'update_placements') + def test_index_post(self, mock_plcment): + kwargs = {'resource_id': "resource_id", 'locations': ["test_location"]} + self.placements_item_controller.index_post(**kwargs) + self.validate_test(placements.response.status == 201) + + with mock.patch('valet.api.v1.controllers.placements.Ostro') as mock_ostro: + kwargs = {'resource_id': "resource_id", 'locations': [""]} + self.placements_item_controller.index_post(**kwargs) + self.validate_test("Ostro error:" in ApiBase.response) + + mock_plcment.return_value = None + + status_type = mock.MagicMock() + status_type.response = {"status": {"type": "ok"}, "resources": {"iterkeys": []}} + mock_ostro.return_value = status_type + + self.placements_item_controller.index_post(**kwargs) + self.validate_test(placements.response.status == 201) + + def test_index_delete(self): + self.placements_item_controller.index_delete() + self.validate_test(placements.response.status == 204) diff --git a/valet/tests/unit/api/v1/test_plans.py b/valet/tests/unit/api/v1/test_plans.py new file mode 100644 index 0000000..fb58433 --- /dev/null +++ b/valet/tests/unit/api/v1/test_plans.py @@ -0,0 +1,93 @@ +''' +Created on Sep 25, 2016 + +@author: stack +''' + +import mock +import valet.api.v1.controllers.plans as plans +from valet.api.v1.controllers.plans import PlansController, PlansItemController +from valet.api.db.models.music import Query, Results +from valet.api.db.models import Plan +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestPlans(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestPlans, self).setUp() + + self.plans_controller = PlansController() + self.plans_item_controller = self.init_PlansItemController() + + @mock.patch.object(plans, 'error', ApiBase.mock_error) + @mock.patch.object(Query, 'filter_by') + @mock.patch.object(plans, 'request') + def init_PlansItemController(self, mock_request, mock_filter): + mock_request.context = {} + mock_filter.return_value = Results(["", "second"]) + try: + PlansItemController("uuid4") + except Exception as e: + self.validate_test("'str' object has no attribute 'id'" in e) + self.validate_test("Plan not found" in ApiBase.response) + + mock_filter.return_value = Results([Plan("test_name", "stack_id", _insert=False)]) + + return PlansItemController("uuid4") + + def test_allow(self): + self.validate_test(self.plans_controller.allow() == 'GET,POST') + + self.validate_test(self.plans_item_controller.allow() == 'GET,PUT,DELETE') + + @mock.patch.object(plans, 'error', ApiBase.mock_error) + @mock.patch.object(plans, 'request') + def test_index(self, mock_request): + mock_request.method = "PUT" + self.plans_controller.index() + self.validate_test("The PUT method is not allowed" in ApiBase.response) + + mock_request.method = "POST" + self.plans_item_controller.index() + self.validate_test("The POST method is not allowed" in ApiBase.response) + + def test_index_options(self): + self.plans_controller.index_options() + self.validate_test(plans.response.status == 204) + + self.plans_item_controller.index_options() + self.validate_test(plans.response.status == 204) + + @mock.patch.object(Query, 'all') + def test_index_get(self, mock_all): + all_groups = ["group1", "group2", "group3"] + mock_all.return_value = all_groups + response = self.plans_controller.index_get() + + self.validate_test(len(response) == 1) + self.validate_test(len(response["plans"]) == len(all_groups)) + self.validate_test(all_groups == response["plans"]) + + response = self.plans_item_controller.index_get() + + self.validate_test(len(response) == 1) + self.validate_test(response["plan"].name == "test_name") + + @mock.patch.object(plans, 'error', ApiBase.mock_error) + def test_index_post(self): + with mock.patch('valet.api.v1.controllers.plans.Ostro'): + self.plans_controller.index_post() + self.validate_test("Ostro error:" in ApiBase.response) + + @mock.patch.object(plans, 'error', ApiBase.mock_error) + @mock.patch.object(Query, 'filter_by', mock.MagicMock) + def test_index_put(self): + kwargs = {'action': "migrate", 'excluded_hosts': [], "resources": ["ggg", "fff"]} + + with mock.patch('valet.api.v1.controllers.plans.Ostro'): + self.plans_item_controller.index_put(**kwargs) + self.validate_test("Ostro error:" in ApiBase.response) + +# TODO(YB): test_index_post, test_index_put needs to be written again diff --git a/valet/tests/unit/api/v1/test_root.py b/valet/tests/unit/api/v1/test_root.py new file mode 100644 index 0000000..145560f --- /dev/null +++ b/valet/tests/unit/api/v1/test_root.py @@ -0,0 +1,41 @@ +''' +Created on Sep 26, 2016 + +@author: stack +''' + +import mock +import valet.api.v1.controllers.root as root +from valet.api.v1.controllers.root import RootController +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestRoot(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestRoot, self).setUp() + + self.root_controller = RootController() + + def test_allow(self): + self.validate_test(self.root_controller.allow() == 'GET') + + @mock.patch.object(root, 'error', ApiBase.mock_error) + @mock.patch.object(root, 'request') + def test_index(self, mock_request): + mock_request.method = "PUT" + self.root_controller.index() + self.validate_test("The PUT method is not allowed" in ApiBase.response) + + def test_index_options(self): + self.root_controller.index_options() + self.validate_test(root.response.status == 204) + + @mock.patch.object(root, 'request') + def test_index_get(self, mock_request): + mock_request.application_url.return_value = "application_url" + response = self.root_controller.index_get() + + self.validate_test(response['versions'][0]) + self.validate_test(response['versions'][0]['links']) diff --git a/valet/tests/unit/api/v1/test_status.py b/valet/tests/unit/api/v1/test_status.py new file mode 100644 index 0000000..af64def --- /dev/null +++ b/valet/tests/unit/api/v1/test_status.py @@ -0,0 +1,43 @@ +''' +Created on Sep 26, 2016 + +@author: stack +''' + +import mock +import valet.api.v1.controllers.status as status +from valet.api.v1.controllers.status import StatusController +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestStatus(ApiBase): + '''Unit tests for valet.api.v1.controllers.placements ''' + + def setUp(self): + super(TestStatus, self).setUp() + + self.status_controller = StatusController() + + def test_allow(self): + self.validate_test(self.status_controller.allow() == 'HEAD,GET') + + @mock.patch.object(status, 'error', ApiBase.mock_error) + @mock.patch.object(status, 'request') + def test_index(self, mock_request): + mock_request.method = "PUT" + self.status_controller.index() + self.validate_test("The PUT method is not allowed" in ApiBase.response) + + def test_index_options(self): + self.status_controller.index_options() + self.validate_test(status.response.status == 204) + + def test_index_head(self): + with mock.patch('valet.api.v1.controllers.status.Ostro'): + self.status_controller.index_head() + self.validate_test(status.response.status == 204) + + def test_index_get(self): + with mock.patch('valet.api.v1.controllers.status.Ostro'): + self.status_controller.index_get() + self.validate_test(status.response.status == 200) diff --git a/valet/tests/unit/api/v1/test_v1.py b/valet/tests/unit/api/v1/test_v1.py new file mode 100644 index 0000000..2b08737 --- /dev/null +++ b/valet/tests/unit/api/v1/test_v1.py @@ -0,0 +1,67 @@ +''' +Created on Sep 22, 2016 + +@author: stack +''' + +import mock +import pecan +import valet.api.v1.controllers.v1 as v1 +from valet.api.v1.controllers.v1 import V1Controller +from valet.tests.unit.api.v1.api_base import ApiBase + + +class TestV1(ApiBase): + + @mock.patch.object(pecan, 'conf') + def setUp(self, mock_conf): + super(TestV1, self).setUp() + + mock_conf.identity.engine.validate_token.return_value = True + mock_conf.identity.engine.is_token_admin.return_value = True + mock_conf.identity.engine.tenant_from_token.return_value = "tenant_id" + mock_conf.identity.engine.user_from_token.return_value = "user_id" + + self.v1_controller = V1Controller() + + @mock.patch.object(v1, 'request') + def test_check_permissions(self, mock_request): + mock_request.headers.get.return_value = "auth_token" + mock_request.path.return_value = "bla bla bla" + mock_request.json.return_value = {"action": "create"} + mock_request.context = {} + + self.validate_test(self.v1_controller.check_permissions() is True) + + @mock.patch.object(v1, 'error', ApiBase.mock_error) + @mock.patch.object(v1, 'request') + def test_check_permissions_auth_unhappy(self, mock_request): + mock_request.headers.get.return_value = None + mock_request.path.return_value = "bla bla bla" + mock_request.json.return_value = {"action": "create"} + mock_request.context = {} + + self.v1_controller.check_permissions() + self.validate_test("Unauthorized - No auth token" in ApiBase.response) + + def test_allow(self): + self.validate_test(self.v1_controller.allow() == 'GET') + + @mock.patch.object(v1, 'error', ApiBase.mock_error) + @mock.patch.object(v1, 'request') + def test_index(self, mock_request): + mock_request.method = "PUT" + self.v1_controller.index() + self.validate_test("The PUT method is not allowed" in ApiBase.response) + + def test_index_options(self): + self.v1_controller.index_options() + self.validate_test(v1.response.status == 204) + + @mock.patch.object(v1, 'request') + def test_index_get(self, mock_request): + mock_request.application_url.return_value = "application_url" + response = self.v1_controller.index_get() + + self.validate_test(response['versions'][0]) + self.validate_test(response['versions'][0]['links']) diff --git a/valet/tests/unit/cli/__init__.py b/valet/tests/unit/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/cli/test_groupcli.py b/valet/tests/unit/cli/test_groupcli.py new file mode 100644 index 0000000..d7fd6da --- /dev/null +++ b/valet/tests/unit/cli/test_groupcli.py @@ -0,0 +1,22 @@ +import mock +# from valet.cli.groupcli import cmd_details +import valet.cli.groupcli as grpcli +from valet.tests.base import Base +# from valet.cli.valetcli import Cli + + +class TestGroupcli(Base): + ''' Unit tests for valet.valetcli ''' + + def setUp(self): + super(TestGroupcli, self).setUp() + + @mock.patch.object(grpcli, 'requests') + def test_cmd_details(self, mock_requests): + mock_requests.post = 'post' + + ar = mock.MagicMock() + ar.subcmd = "create" + +# res = grpcli.cmd_details(ar) +# print(res) diff --git a/valet/tests/unit/cli/test_valetcli.py b/valet/tests/unit/cli/test_valetcli.py new file mode 100644 index 0000000..6107ae6 --- /dev/null +++ b/valet/tests/unit/cli/test_valetcli.py @@ -0,0 +1,29 @@ +import mock +from valet.cli.valetcli import Cli +from valet.tests.base import Base + + +class TestValetcli(Base): + ''' Unit tests for valet.valetcli ''' + + def setUp(self): + super(TestValetcli, self).setUp() + + def test_parse(self): + cli = Cli() + cli.create_parser() + argv = ['/path/to/valetcli.py', 'group', 'list'] + cli.parse(argv) + + self.validate_test(cli.args.service == 'group') + + def test_logic(self): + cli = Cli() + cli.submod = mock.MagicMock() + cli.args = mock.MagicMock() + cli.args.service = "group" + cli.logic() + + self.validate_test(len(cli.submod.mock_calls) == 2) + self.validate_test("call.__getitem__('group')" in str(cli.submod.mock_calls[0])) + self.validate_test("call.__getitem__().run" in str(cli.submod.mock_calls[1])) diff --git a/valet/tests/unit/engine/__init__.py b/valet/tests/unit/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/engine/empty.cfg b/valet/tests/unit/engine/empty.cfg new file mode 100644 index 0000000..e69de29 diff --git a/valet/tests/unit/engine/invalid.cfg b/valet/tests/unit/engine/invalid.cfg new file mode 100644 index 0000000..79be42a --- /dev/null +++ b/valet/tests/unit/engine/invalid.cfg @@ -0,0 +1,2 @@ +1+1 +2+2 diff --git a/valet/tests/unit/engine/test_config.py b/valet/tests/unit/engine/test_config.py new file mode 100644 index 0000000..828867e --- /dev/null +++ b/valet/tests/unit/engine/test_config.py @@ -0,0 +1,42 @@ +''' +Created on Aug 17, 2016 + +@author: YB +''' + +import sys +from valet.engine.optimizer.ostro_server.configuration import Config +from valet.tests.base import Base + +from oslo_config import cfg + + +class TestConfig(Base): + + def setUp(self): + super(TestConfig, self).setUp() + sys.argv = [sys.argv[0]] + +# def test_simple_config(self): +# cfg.CONF.clear() +# config = Config() +# config_status = config.configure() +# +# self.validate_test(config_status == "success") + + def test_unhappy_config_io(self): + cfg.CONF.clear() + try: + config = Config("unhappy.cfg") + config_status = config.configure() + self.validate_test("I/O error" in config_status) + + except Exception as ex: + self.validate_test(isinstance(ex, cfg.ConfigFilesNotFoundError)) + + def test_config_io(self): + cfg.CONF.clear() + config = Config("etc/valet/valet.conf") + config_status = config.configure() + + self.validate_test(config_status == "success") diff --git a/valet/tests/unit/engine/test_ostro.cfg b/valet/tests/unit/engine/test_ostro.cfg new file mode 100644 index 0000000..80e9a42 --- /dev/null +++ b/valet/tests/unit/engine/test_ostro.cfg @@ -0,0 +1,150 @@ +# __ +# /_\ |__| | +# / \ | | +# + +[server] +host = 0.0.0.0 +port = 8090 + +[messaging] +username = rabbitmq_username +password = rabbitmq_psw +host = rabbitmq_host +port = rabbitmq_port + +[identity] +project_name = project_name +username = project_username +password = project_username_password +auth_url = http://keystone_host:5000/v2.0 +# interface = admin + +# _ _ +# | \ |_\ +# |_/ |_/ +# + +[music] +host = music_host +port = 8080 +keyspace = valet_keyspace +replication_factor = 3 +# tries = 10 +# interval = 1 +# request_table = placement_requests +# response_table = placement_results +# event_table = oslo_messages +# resource_table = resource_status +# app_table = app +# resource_index_table = resource_log_index +# app_index_table = app_log_index +# uuid_table = uuid_map + + +# __ __ __ +# |__ |\ | | | |\ | |__ +# |__ | \| |__T | | \| |__ +# + +[engine] +# Set the location of daemon process id +pid = /var/run/valet/ostro-daemon.pid + +# Set keystone APIs +keystone_tenant_url = http://keystone_host:35357/v2.0/tokens +keystone_project_url = http://keystone_host:35357/v3/projects + +# Set OpenStack Nova APIs +nova_url = http://nova_host:8774/v2/ +# nova_host_resources_api = /os-hypervisors/detail +# nova_host_zones_api = /os-hosts +# nova_host_aggregates_api = /os-aggregates +# nova_flavors_api = /flavors/detail + +# Set IP of this Ostro +# ip = localhost + +# Used for Ostro active/passive selection +priority = 1 + + + +#------------------------------------------------------------------------------------------------------------ +# Logging configuration +#------------------------------------------------------------------------------------------------------------ +# Set logging parameters +# logger_name = test + +# logging level = [debug|info] +# logging_level = debug + +# Set the directory to locate the log file +# logging_dir = /var/log/valet/engine/ + +# Set the maximum size of the main logger as Byte +# max_main_log_size = 5000000 + +# Set the maximum logfile size as Byte for time-series log files +# max_log_size = 1000000 + +# Set the maximum number of time-series log files +# max_num_of_logs = 20 + +#------------------------------------------------------------------------------------------------------------ +# Management configuration +#------------------------------------------------------------------------------------------------------------ +# Inform the name of datacenter (region name), where Valet/Ostro is deployed. +# datacenter_name = bigsite + +# Set the naming convention rules. +# Currently, 3 chars of CLLI + region number + 'r' + rack id number + 1 char of node type + node id number. +# For example, pdk15r05c001 indicates the first KVM compute server (i.e., 'c001') in the fifth rack +# (i.e., 'r05') in the fifteenth DeKalb-Peachtree Airport Region (i.e., 'pdk15'). + +# Set the number of chars that indicates the region code. The above example, 'pdk' is the region code. +# num_of_region_chars = 3 + +# Set 1 char of rack indicator. This should be 'r'. +# rack_code_list = r + +# Set all of chars, each of which indicates the node type. +# Currently, 'a' = network, 'c' = KVM compute, 'u' = ESXi compute, 'f' = ?, 'o' = operation, 'p' = power, +# 's' = storage. +# node_code_list = a,c,u,f,o,p,s + +# Set trigger time or frequency for checking compute hosting server status (i.e., call Nova) +# Note that currently, compute (Nova) should be triggered first then trigger topology. +# compute_trigger_time = 01:00 +# compute_trigger_frequency = 3600 + +# Set trigger time or frequency for checking datacenter topology +# topology_trigger_time = 02:00 +# topology_trigger_frequency = 3600 + +# Set default overbooking ratios. Note that each compute node can have its own ratios. +# default_cpu_allocation_ratio = 16 +# default_ram_allocation_ratio = 1.5 +# default_disk_allocation_ratio = 1 + +# Set static unused percentages of resources (i.e., standby) that are set aside for applications's workload spikes. +# static_cpu_standby_ratio = 20 +# static_mem_standby_ratio = 20 +# static_local_disk_standby_ratio = 20 + + +# Set Ostro execution mode +# mode = [live|sim], sim will let Ostro simulate datacenter, while live will let it handle a real datacenter +# mode = live +# Set the location of simulation configuration file (i.e., ostro_sim.cfg). +# This is used only when the simulation mode +# sim_cfg_loc = /etc/valet/engine/ostro_sim.cfg + +# Inform whether network controller (i.e., Tegu) has been deployed. +# If it does, set its API, Otherwise ignore these parameters +# network_control = no +# network_control_api = 29444/tegu/api + +# Set RPC server ip and port if used. Otherwise, ignore these parameters +# rpc_server_ip = localhost +# rpc_server_port = 8002 diff --git a/valet/tests/unit/engine/test_search.py b/valet/tests/unit/engine/test_search.py new file mode 100644 index 0000000..5ad72ac --- /dev/null +++ b/valet/tests/unit/engine/test_search.py @@ -0,0 +1,25 @@ +''' +Created on Sep 26, 2016 + +@author: stack +''' + +import logging +import mock +from valet.engine.optimizer.ostro.search import Search +from valet.tests.base import Base + +LOG = logging.getLogger(__name__) + + +class TestSearch(Base): + + def setUp(self): + super(TestSearch, self).setUp() + + self.search = Search(LOG) + + def test_copy_resource_status(self): + self.search.copy_resource_status(mock.MagicMock()) + +# def test_place_nodes(self): diff --git a/valet/tests/unit/engine/test_topology.py b/valet/tests/unit/engine/test_topology.py new file mode 100644 index 0000000..19c3dd5 --- /dev/null +++ b/valet/tests/unit/engine/test_topology.py @@ -0,0 +1,60 @@ +''' +Created on Aug 17, 2016 + +@author: YB +''' + +from valet.engine.resource_manager.topology import Topology +from valet.tests.base import Base + + +class TestTopology(Base): + + def setUp(self): + super(TestTopology, self).setUp() + self.topo = Topology(Config(), None) + + def test_simple_topology(self): + (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk15r05c001") + + self.validate_test(region == "pdk15") + self.validate_test(rack == "pdk15r05") + self.validate_test(node_type in "a,c,u,f,o,p,s") + self.validate_test(status == "success") + + def test_domain_topology(self): + (region, rack, node_type, status) = self.topo._set_layout_by_name("ihk01r01c001.emea.att.com") + + self.validate_test(region == "ihk01") + self.validate_test(rack == "ihk01r01") + self.validate_test(node_type in "a,c,u,f,o,p,s") + self.validate_test(status == "success") + + def test_unhappy_topology_r(self): + (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk1505c001") + self.validate_test(region == "none") + self.validate_test(rack == "none") + self.validate_test(node_type is None) + self.validate_test(status == "invalid number of identification fields = 0") + + def test_unhappy_topology_c(self): + (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk15r05001") + self.validate_test(region == "none") + self.validate_test(rack == "none") + self.validate_test(node_type is None) + self.validate_test(status == "invalid number of identification fields = 1") + +# def test_unhappy_topology_c_domain(self): +# (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk15r05001.emea.att.com") +# self.validate_test(region == "none") +# self.validate_test(rack == "none") +# self.validate_test(node_type is None) +# self.validate_test(status == "invalid number of identification fields = 1") + +# TODO(GY): add validation to topology for region + + +class Config(object): + num_of_region_chars = 3 + rack_code_list = "r" + node_code_list = "a,c,u,f,o,p,s" diff --git a/valet/tests/unit/test_general.py b/valet/tests/unit/test_general.py new file mode 100644 index 0000000..9ea5e77 --- /dev/null +++ b/valet/tests/unit/test_general.py @@ -0,0 +1,11 @@ + +from valet.tests.base import Base + + +class TestGeneral(Base): + + def setUp(self): + super(TestGeneral, self).setUp() + + def test_general(self): + self.validate_test(True) diff --git a/valet_plugins/.coveragerc b/valet_plugins/.coveragerc new file mode 100644 index 0000000..4f8da07 --- /dev/null +++ b/valet_plugins/.coveragerc @@ -0,0 +1,8 @@ +[run] +branch = True +source = valet +omit = valet_plugins/tests/* +cover_pylib = True + +[report] +ignore_errors = True diff --git a/valet_plugins/.gitignore b/valet_plugins/.gitignore new file mode 100644 index 0000000..560590d --- /dev/null +++ b/valet_plugins/.gitignore @@ -0,0 +1,106 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] + +#ignore thumbnails created by windows +Thumbs.db +#Ignore files build by Visual Studio +*.obj +*.exe +*.pdb +*.user +*.aps +*.pch +*.vspscc +*_i.c +*_p.c +*.ncb +*.suo +*.tlb +*.tlh +*.bak +*.cache +*.ilk +*.log +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.eggs/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml +.project +.pydevproject +.settings/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +#ignore thumbnails created by windows +Thumbs.db +#Ignore files build by Visual Studio +*.obj +*.exe +*.pdb +*.user +*.aps +*.pch +*.vspscc +*_i.c +*_p.c +*.ncb +*.suo +*.tlb +*.tlh +*.bak +*.cache +*.ilk +[Bb]in +[Dd]ebug*/ +*.lib +*.sbr +obj/ +[Rr]elease*/ +_ReSharper*/ +[Tt]est[Rr]esult* +.idea/* diff --git a/valet_plugins/.testr.conf b/valet_plugins/.testr.conf new file mode 100644 index 0000000..a933ced --- /dev/null +++ b/valet_plugins/.testr.conf @@ -0,0 +1,7 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-1000} \ + ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./valet_plugins/tests/unit} -t . $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/valet_plugins/LICENSE b/valet_plugins/LICENSE new file mode 100644 index 0000000..68c771a --- /dev/null +++ b/valet_plugins/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/valet_plugins/README b/valet_plugins/README new file mode 100644 index 0000000..838f0a6 --- /dev/null +++ b/valet_plugins/README @@ -0,0 +1,57 @@ +Ostro version 2.0.2 Installation and Usage Guide + +Author: Gueyoung Jung +Contact: gjung@research.att.com + + +INSTALLATION + +You can download the latest Ostro python code from repository (GitHub). + +USAGE + +1. Configure Ostro + +Set authentication in "ostro_server/ostro.auth" file. User must have the permission to access OpenStack Nova to let Ostro extract underlying cloud infrastructure information. + +You must check “ostro_server/ostro.cfg” to correctly run Ostro. Here, explain the configuration parameters found in “ostro.cfg”. + +Configuration consists of 1) system, 2) logging, and 3) management parts. + +1.1 System configuration +- first, you define the base directory in “root_loc”, where Ostro is installed. +- “mode” can be either “live” or “sim”. “live” means Ostro runs over the real OpenStack site, while “sim” means Ostro can be tested over a simulated datacenter. To configure the simulated datacenter, you should check “ostro_server/ostro_sim.cfg”. +- “control_loc” is to set URL where OpenStack controller is deployed. From the URL, Ostro will get some data from Nova and Keystone (Cinder will be in the next version). +- currently, Ostro communicates with Keystone and Nova via REST APIs. Those APIs are set in “keystone_*” and “nova_*”. +- “db_*” indicates parameters to be used for handling Music database such as Cassandra keyspace and table names. "replication_factor" means how many Music instances run. "db_hosts" includes the ips where Music instances run. +- “ip” indicates the IP address of VM, where this Ostro instance runs. If Ostro instances are installed in multiple VMs, you should set “ip” in each configuration. + +1.2 Logging configuration +You can set up the logging configuration including logger name, logging level, and directory. If you set the logging level as “debug”, Ostro will leave detailed record. Ostro also records two time-series data as text files (i.e., resource_log and app_log). Due to the large size of these logs, we limit the number of log files and the maximum size of each log file in “max_num_of_logs” and “max_log_size”. When “max_num_of_logs” is 20 and once it reaches the 21st log file, Ostro over-writes in the 1st file (i.e., rotate the logging). +"max_main_log_size" means the max size of the main Ostro log defined in "logger_name" in the location "logging_loc". + +1.3 Management configuration +- “datacenter_name” indicates the name of site (region), where Ostro takes care of. This will be used as key value when getting site topology data from AIC Formation. +- “num_of_region_chars”, “rack_code_list”, and “node_code_list” are used to define the machine naming convention. In current version, Ostro parses each hosting server machine name to figure out the region code and rack name, where each hosting machine is located. This is based on the current naming convention document. Current naming convention is as follow, +3 chars of CLLI + region number + 'r' + rack id number + 1 char of node type + node id number. For example, “pdk15r05c001” indicates the first KVM compute server (i.e., 'c001') in the fifth rack (i.e., 'r05') in the fifteenth DeKalb-Peachtree Airport Region (i.e., 'pdk15'). +In “num_of_region_chars”, set the number of chars that indicates the region code. In the above example, 'pdk' is the region code. +In “rack_code_list”, set 1 char of rack indicator. This should be 'r'. +In “node_code_list”, set all of chars, each of which indicates the node type. Currently, 'a': network, 'c': KVM compute, 'u': ESXi compute, 'f': ?, 'o': operation, 'p': power, 's': storage. +- “compute_trigger_time” and “compute_trigger_frequency” are for setting when Nova is called to set information that is used for decision making such as a list of hosting servers and their resource capacities, host aggregates, and availability zone etc. The value of “compute_trigger_time” is based on 24-hour (e.g., “13:00” means 1pm). The value of “compute_trigger_frequency” is seconds (e.g., “3” means every 3 seconds). Ostro checks first “compute_trigger_frequency”. If this value is “0”, then uses “compute_trigger_time”. +- “topology_trigger_time” and “topology_trigger_frequency” are similar with the above, but these are for setting the site layout/topology. Note that currently, Nova must be called first and then, topology next. So, “compute_trigger_time” must be earlier than “topology_trigger_time”. +- “default_*” is for setting default overcommit ratios. +- “static_*” is for setting standby resource amount as percentage. Standby means Ostro will set aside certain amount resources (CPU, memory, and disk) as unused for load spikes of tenant applications. This will be changed more dynamically in the future version. +- “auth_loc” indicates the directory of the authentication file. Admin must have the permission to access OpenStack Nova to let Ostro extract underlying cloud infrastructure information. + + +2. Start/Stop Ostro daemon + +Ostro will run as a daemon process. Go to “ostro_server” directory, then start Ostro daemon as follow, + + python ostro_daemon.py start + +To stop this daemon process: + + python ostro_daemon.py stop + + diff --git a/valet_plugins/RELEASE b/valet_plugins/RELEASE new file mode 100644 index 0000000..f8ce116 --- /dev/null +++ b/valet_plugins/RELEASE @@ -0,0 +1,26 @@ +Valet1.0/Ostro features + +- Optimal VM placement + Use the decent algorithm, compute the holistic optimal placement for all VMs of given application. Working on NUMAAffinityFilter and PCIPassthroughFilter. + +- Constraint solving (filtering) + Current constraints (i.e., filters) include CoreFilter, RamFilter, DiskFilter, host aggregates, availability zones. Possibly, the decision making of Ostro is different from Nova when tenant attempts to use other filters available in Nova. In this case, Ostro yields its decision to Nova scheduler unless the tenant uses affinity, diversity, and exclusivity in Heat template. However, we will restrict some Nova filters if it affects cloud security, reliability, and efficiency. + +- Affinity, Diversity, and Exclusivity in either host or rack level + In addition of the above filters, support affinity, diversity (anti-affinity), and exclusivity. The rack level can be supported when the topology/layout information of site is available. Currently, use the host machine naming convention. If a site does not follow the naming convention, the rack level request will be rejected. Note that you can use the mix of these special filters and the basic filters. + +- Resource standby + When allocating resources (CPU, memory, disk, and later network bandwidth), Ostro intentionally leaves a certain percentage of resources as unused. This is because of the concern of load spikes of tenant applications. Later, we will deploy more dynamic mechanism in the future version of Ostro. + +- High availability + Ostro replicas run as active-passive way. When active Ostro fails, automatically the passive one is activated via HAValet. All data is updated in MUSIC database at runtime whenever it is changed. When the passive Ostro is activated, it gets data from MUSIC to initialize its status rather than from OpenStack. Ostro also takes ping messages to show if it is alive or not. + +- Runtime update via the Oslo message bus or RO + Working on this. + +- Migration tip + Working on this. + + + + diff --git a/valet_plugins/requirements.txt b/valet_plugins/requirements.txt new file mode 100644 index 0000000..fb4b42f --- /dev/null +++ b/valet_plugins/requirements.txt @@ -0,0 +1,6 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +pip +simplejson \ No newline at end of file diff --git a/valet_plugins/setup.cfg b/valet_plugins/setup.cfg new file mode 100644 index 0000000..64667d7 --- /dev/null +++ b/valet_plugins/setup.cfg @@ -0,0 +1,34 @@ +[metadata] +name = valet_plugins +version = 1.0 +summary = Valet Orchestration Plugins for OpenStack +description-file = README.md +author = AT&T +author-email = jdandrea@research.att.com +homepage = https://github.com/att-comdev/valet +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] +packages = + valet_plugins +data_files = + valet_plugins/heat = valet_plugins/heat/* + +[entry_points] +#cinder.scheduler.filters = +# ValetFilter = valet_os.cinder.valet_filter:ValetFilter +heat.stack_lifecycle_plugins = + valet.lifecycle_plugin = valet_plugins.plugins.heat.plugins:ValetLifecyclePlugin + diff --git a/valet_plugins/setup.py b/valet_plugins/setup.py new file mode 100644 index 0000000..a68a83c --- /dev/null +++ b/valet_plugins/setup.py @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing permissions and +# limitations under the License. + + +'''Setup''' + +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa # pylint: disable=W0611,C0411 +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/valet_plugins/test-requirements.txt b/valet_plugins/test-requirements.txt new file mode 100644 index 0000000..cc3675c --- /dev/null +++ b/valet_plugins/test-requirements.txt @@ -0,0 +1,25 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + + +hacking<0.11,>=0.10.0 +os-testr<=0.7.0 +markupsafe<=0.23 +pecan<=0.8.2 +notario<=0.0.11 +coverage>=3.6 +python-subunit>=0.0.18 +mock>=1.2 +oslotest>=1.10.0 # Apache-2.0 +oslo.config>=1.9.0 +testrepository>=0.0.18 +sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 +testscenarios>=0.4 +testtools>=1.4.0 +oslo.i18n<=3.8.0 +oslo.log>=1.0.0 +pytz +python-keystoneclient<=3.4.0 +python-novaclient<=4.0.0 +python-heatclient<=1.2.0 diff --git a/valet_plugins/tox.ini b/valet_plugins/tox.ini new file mode 100644 index 0000000..48c46db --- /dev/null +++ b/valet_plugins/tox.ini @@ -0,0 +1,56 @@ +[tox] +#minversion = 2.0 +envlist = pep8 +#py27 +#py27-constraints, pep8-constraints +#py34-constraints,py27-constraints,pypy-constraints,pep8-constraints +#skipsdist = True + + +[testenv] +usedevelop = True +install_command = + pip install -U {opts} {packages} + +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=valet_plugins/tests/unit/ + + +commands = + find . -type f -name "*.pyc" -delete + ostestr --slowest '{posargs}' +deps = -r{toxinidir}/test-requirements.txt + +whitelist_externals = + bash + find + + +[testenv:pep8] +commands = flake8 +#commands = flake8 {posargs} + + +[testenv:venv] +commands = {posargs} + + +[testenv:cover] +commands = + coverage erase + python setup.py test --slowest --coverage --coverage-package-name 'valet_plugins' --testr-args='{posargs}' + coverage report + + +[testenv:docs] +commands = python setup.py build_sphinx + + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. + +show-source = True +ignore = E123,E125,E501,H401,H501,H301 +builtins = _ +exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*egg-info + diff --git a/valet_plugins/valet_plugins/PKG-INFO b/valet_plugins/valet_plugins/PKG-INFO new file mode 100644 index 0000000..84496b9 --- /dev/null +++ b/valet_plugins/valet_plugins/PKG-INFO @@ -0,0 +1,4 @@ +Metadata-Version: 1.2 +Name: valet_plugins +Version: 0.1.0 +Author-email: jdandrea@research.att.com diff --git a/valet_plugins/valet_plugins/__init__.py b/valet_plugins/valet_plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/common/__init__.py b/valet_plugins/valet_plugins/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/common/valet_api.py b/valet_plugins/valet_plugins/common/valet_api.py new file mode 100644 index 0000000..905aab0 --- /dev/null +++ b/valet_plugins/valet_plugins/common/valet_api.py @@ -0,0 +1,172 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing permissions and +# limitations under the License. + +'''Valet API Wrapper''' + +from heat.common.i18n import _ +import json + +from oslo_config import cfg +from oslo_log import log as logging + +import requests +import sys + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _exception(exc, exc_info, req): + '''Handle an exception''' + response = None + try: + response = json.loads(req.text) + except Exception as e: + LOG.error("Exception is: %s, body is: %s" % (e, req.text)) + return + + if 'error' in response: + error = response.get('error') + msg = "%(explanation)s (valet-api: %(message)s)" % { + 'explanation': response.get('explanation', _('No remediation available')), + 'message': error.get('message', _('Unknown error')) + } + raise ValetAPIError(msg) + else: + # TODO(JD): Re-evaluate if this clause is necessary. + exc_class, exc, traceback = exc_info # pylint: disable=W0612 + msg = _("%(exc)s for %(method)s %(url)s with body %(body)s") % {'exc': exc, 'method': exc.request.method, 'url': exc.request.url, 'body': exc.request.body} + my_exc = ValetAPIError(msg) + # traceback can be added to the end of the raise + raise my_exc.__class__, my_exc + + +# TODO(JD): Improve exception reporting back up to heat +class ValetAPIError(Exception): + '''Valet API Error''' + pass + + +class ValetAPIWrapper(object): + '''Valet API Wrapper''' + + def __init__(self): + '''Initializer''' + self.headers = {'Content-Type': 'application/json'} + self.opt_group_str = 'valet' + self.opt_name_str = 'url' + self.opt_conn_timeout = 'connect_timeout' + self.opt_read_timeout = 'read_timeout' + self._register_opts() + + def _api_endpoint(self): + '''Returns API endpoint''' + try: + opt = getattr(cfg.CONF, self.opt_group_str) + endpoint = opt[self.opt_name_str] + if endpoint: + return endpoint + else: + # FIXME: Possibly not wanted (misplaced-bare-raise) + raise # pylint: disable=E0704 + except Exception: + raise # exception.Error(_('API Endpoint not defined.')) + + def _get_timeout(self): + '''Returns Valet plugin API request timeout tuple (conn_timeout, read_timeout)''' + conn_timeout = 3 + read_timeout = 5 + try: + opt = getattr(cfg.CONF, self.opt_group_str) + conn_timeout = opt[self.opt_conn_timeout] + read_timeout = opt[self.opt_read_timeout] + except Exception: + pass + return conn_timeout, read_timeout + + def _register_opts(self): + '''Register options''' + opts = [] + option = cfg.StrOpt(self.opt_name_str, default=None, help=_('Valet API endpoint')) + opts.append(option) + option = cfg.IntOpt(self.opt_conn_timeout, default=3, help=_('Valet Plugin Connect Timeout')) + opts.append(option) + option = cfg.IntOpt(self.opt_read_timeout, default=5, help=_('Valet Plugin Read Timeout')) + opts.append(option) + + opt_group = cfg.OptGroup(self.opt_group_str) + cfg.CONF.register_group(opt_group) + cfg.CONF.register_opts(opts, group=opt_group) + + # TODO(JD): Keep stack param for now. We may need it again. + def plans_create(self, stack, plan, auth_token=None): # pylint: disable=W0613 + '''Create a plan''' + response = None + try: + timeout = self._get_timeout() + url = self._api_endpoint() + '/plans/' + payload = json.dumps(plan) + self.headers['X-Auth-Token'] = auth_token + req = requests.post(url, data=payload, headers=self.headers, timeout=timeout) + req.raise_for_status() + response = json.loads(req.text) + except (requests.exceptions.HTTPError, requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError)\ + as exc: + _exception(exc, sys.exc_info(), req) + except Exception as e: + LOG.error("Exception (at plans_create) is: %s" % e) + return response + + # TODO(JD): Keep stack param for now. We may need it again. + def plans_delete(self, stack, auth_token=None): # pylint: disable=W0613 + '''Delete a plan''' + try: + timeout = self._get_timeout() + url = self._api_endpoint() + '/plans/' + stack.id + self.headers['X-Auth-Token'] = auth_token + req = requests.delete(url, headers=self.headers, timeout=timeout) + except (requests.exceptions.HTTPError, requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError)\ + as exc: + _exception(exc, sys.exc_info(), req) + except Exception as e: + LOG.error("Exception (plans_delete) is: %s" % e) + # Delete does not return a response body. + + def placement(self, orch_id, res_id, hosts=None, auth_token=None): + '''Reserve previously made placement.''' + try: + timeout = self._get_timeout() + url = self._api_endpoint() + '/placements/' + orch_id + self.headers['X-Auth-Token'] = auth_token + if hosts: + kwargs = { + "locations": hosts, + "resource_id": res_id + } + payload = json.dumps(kwargs) + req = requests.post(url, data=payload, headers=self.headers, timeout=timeout) + else: + req = requests.get(url, headers=self.headers, timeout=timeout) + + # TODO(JD): Raise an exception IFF the scheduler can handle it + # req.raise_for_status() + + response = json.loads(req.text) + except Exception: # pylint: disable=W0702 + # FIXME: Find which exceptions we should really handle here. + response = None + + return response diff --git a/valet_plugins/valet_plugins/heat/GroupAssignment.py b/valet_plugins/valet_plugins/heat/GroupAssignment.py new file mode 100644 index 0000000..2f4cf88 --- /dev/null +++ b/valet_plugins/valet_plugins/heat/GroupAssignment.py @@ -0,0 +1,105 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing permissions and +# limitations under the License. + +'''GroupAssignment Heat Resource Plugin''' + +from heat.common.i18n import _ +from heat.engine import constraints +from heat.engine import properties +from heat.engine import resource + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class GroupAssignment(resource.Resource): + ''' A Group Assignment describes one or more resources assigned to a particular type of group. + + Assignments can reference other assignments, so long as there are no circular references. + There are three types of groups: affinity, diversity, and exclusivity. + Exclusivity groups have a unique name, assigned through Valet. + + This resource is purely informational in nature and makes no changes to heat, nova, or cinder. + The Valet Heat Lifecycle Plugin passes this information to the optimizer. + ''' + + _RELATIONSHIP_TYPES = ( + AFFINITY, DIVERSITY, EXCLUSIVITY, + ) = ( + "affinity", "diversity", "exclusivity", + ) + + PROPERTIES = ( + GROUP_NAME, GROUP_TYPE, LEVEL, RESOURCES, + ) = ( + 'group_name', 'group_type', 'level', 'resources', + ) + + properties_schema = { + GROUP_NAME: properties.Schema( + properties.Schema.STRING, + _('Group name. Required for exclusivity groups.'), + # TODO(JD): Add a custom constraint + # Constraint must ensure a valid and allowed name + # when an exclusivity group is in use. + # This is presently enforced by valet-api and can also + # be pro-actively enforced here, so as to avoid unnecessary + # orchestration. + update_allowed=True + ), + GROUP_TYPE: properties.Schema( + properties.Schema.STRING, + _('Type of group.'), + constraints=[ + constraints.AllowedValues([AFFINITY, DIVERSITY, EXCLUSIVITY]) + ], + required=True, + update_allowed=True + ), + LEVEL: properties.Schema( + properties.Schema.STRING, + _('Level of relationship between resources.'), + constraints=[ + constraints.AllowedValues(['host', 'rack']), + ], + required=True, + update_allowed=True + ), + RESOURCES: properties.Schema( + properties.Schema.LIST, + _('List of one or more resource IDs.'), + required=True, + update_allowed=True + ), + } + + def handle_create(self): + '''Create resource''' + self.resource_id_set(self.physical_resource_name()) + + def handle_update(self, json_snippet, templ_diff, prop_diff): # pylint: disable=W0613 + '''Update resource''' + self.resource_id_set(self.physical_resource_name()) + + def handle_delete(self): + '''Delete resource''' + self.resource_id_set(None) + + +def resource_mapping(): + '''Map names to resources.''' + return {'ATT::Valet::GroupAssignment': GroupAssignment, } diff --git a/valet_plugins/valet_plugins/heat/README.md b/valet_plugins/valet_plugins/heat/README.md new file mode 100644 index 0000000..559338a --- /dev/null +++ b/valet_plugins/valet_plugins/heat/README.md @@ -0,0 +1,188 @@ +# OpenStack Heat Resource Plugins + +[Valet](https://github.com/att-comdev/valet/blob/master/README.md) works with OpenStack Heat through the use of Resource Plugins. This document explains what they are and how they work. As new plugins become formally introduced, they will be added here. + +The following is current as of Valet Release 1.0. + +## ATT::Valet::GroupAssignment + +*Formerly ATT::Valet::ResourceGroup* + +A Group Assignment describes one or more resources assigned to a particular type of group. Assignments can reference other assignments, so long as there are no circular references. + +There are three types of groups: affinity, diversity, and exclusivity. Exclusivity groups have a unique name, assigned through Valet. + +This resource is purely informational in nature and makes no changes to heat, nova, or cinder. The Valet Heat Lifecycle Plugin passes this information to the optimizer. + +### Properties + +``group_name`` (String) + +* Name of group. Required for exclusivity groups. NOT permitted for affinity and diversity groups at this time. +* Can be updated without replacement. + +``group_type`` (String) + +* Type of group. +* Allowed values: affinity, diversity, exclusivity +* Can be updated without replacement. +* Required property. + +``level`` (String) + +* Level of relationship between resources. +* See list below for allowed values. +* Can be updated without replacement. +* Required property. + +``resources`` (List) + +* List of associated resource IDs. +* Can be updated without replacement. +* Required property. + +#### Levels + +* ``rack``: Across racks, one resource per host. +* ``host``: All resources on a single host. + +### Attributes + +None. (There is a ``show`` attribute but it is not intended for production use.) + +### Example + +Given a Heat template with two server resources, declare an affinity between them at the rack level: + +```json + resources: + server_affinity: + type: ATT::Valet::GroupAssignment + properties: + group_type: affinity + level: rack + resources: + - {get_resource: server1} + - {get_resource: server2} +``` + +### Plugin Schema + +Use the OpenStack Heat CLI command `heat resource-type-show ATT::Valet::GroupAssignment` to view the schema. + +```json +{ + "support_status": { + "status": "SUPPORTED", + "message": null, + "version": null, + "previous_status": null + }, + "attributes": { + "show": { + "type": "map", + "description": "Detailed information about resource." + } + }, + "properties": { + "level": { + "description": "Level of relationship between resources.", + "required": true, + "update_allowed": true, + "type": "string", + "immutable": false, + "constraints": [ + { + "allowed_values": [ + "host", + "rack" + ] + } + ] + }, + "resources": { + "type": "list", + "required": true, + "update_allowed": true, + "description": "List of one or more resource IDs.", + "immutable": false + }, + "group_type": { + "description": "Type of group.", + "required": true, + "update_allowed": true, + "type": "string", + "immutable": false, + "constraints": [ + { + "allowed_values": [ + "affinity", + "diversity", + "exclusivity" + ] + } + ] + }, + "group_name": { + "type": "string", + "required": false, + "update_allowed": true, + "description": "Group name. Required for exclusivity groups.", + "immutable": false + } + }, + "resource_type": "ATT::Valet::GroupAssignment" +} +``` + +### Future Work + +The following sections are proposals and *not* implemented. It is provided to aid in ongoing open discussion. + +#### Resource Namespace Changes + +The resource namespace may change to ``OS::Valet`` in future releases. + +#### Resource Properties + +Resource property characteristics are under ongoing review and subject to revision. + +#### Volume Resource Support + +Future placement support will formally include block storage services (e.g., Cinder). + +#### Additional Scheduling Levels + +Future levels could include: + +* ``cluster``: Across a cluster, one resource per cluster. +* ``any``: Any level. + +#### Proposed Notation for 'diverse-affinity' + +Suppose we are given a set of server/volume pairs, and we'd like to treat each pair as an affinity group, and then treat all affinity groups diversely. The following notation makes this diverse affinity pattern easier to describe, with no name repetition. + +```json + resources: + my_group_assignment: + type: ATT::Valet::GroupAssignment + properties: + group_name: my_even_awesomer_group + group_type: diverse-affinity + level: host + resources: + - - {get_resource: server1} + - {get_resource: volume1} + - - {get_resource: server2} + - {get_resource: volume2} + - - {get_resource: server3} + - {get_resource: volume3} +``` + +In this example, ``server1``/``volume1``, ``server2``/``volume2``, and ``server3``/``volume3`` are each treated as their own affinity group. Then, each of these affinity groups is treated as a diversity group. The dash notation is specific to YAML (a superset of JSON and the markup language used by Heat). + +Given a hypothetical example of a Ceph deployment with three monitors, twelve OSDs, and one client, each paired with a volume, we would only need to specify three Heat resources instead of eighteen. + +## Contact + +Joe D'Andrea diff --git a/valet_plugins/valet_plugins/heat/__init__.py b/valet_plugins/valet_plugins/heat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/plugins/__init__.py b/valet_plugins/valet_plugins/plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/plugins/heat/__init__.py b/valet_plugins/valet_plugins/plugins/heat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/plugins/heat/plugins.py b/valet_plugins/valet_plugins/plugins/heat/plugins.py new file mode 100644 index 0000000..d7c37c1 --- /dev/null +++ b/valet_plugins/valet_plugins/plugins/heat/plugins.py @@ -0,0 +1,158 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing permissions and +# limitations under the License. + +'''Valet Plugins for Heat''' + +from heat.engine import lifecycle_plugin + +from valet_plugins.common import valet_api + +from oslo_config import cfg +from oslo_log import log as logging + +import string +import uuid + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def validate_uuid4(uuid_string): + ''' Validate that a UUID string is in fact a valid uuid4. + + Happily, the uuid module does the actual checking for us. + It is vital that the 'version' kwarg be passed to the + UUID() call, otherwise any 32-character hex string + is considered valid. + ''' + try: + val = uuid.UUID(uuid_string, version=4) + except ValueError: + # If it's a value error, then the string + # is not a valid hex code for a UUID. + return False + + # If the uuid_string is a valid hex code, # but an invalid uuid4, + # the UUID.__init__ will convert it to a valid uuid4. + # This is bad for validation purposes. + + # uuid_string will sometimes have separators. + return string.replace(val.hex, '-', '') == \ + string.replace(uuid_string, '-', '') + + +class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): + ''' Base class for pre-op and post-op work on a stack. + + Implementations should extend this class and override the methods. + ''' + def __init__(self): + self.api = valet_api.ValetAPIWrapper() + self.hints_enabled = False + + # This plugin can only work if stack_scheduler_hints is true + cfg.CONF.import_opt('stack_scheduler_hints', 'heat.common.config') + self.hints_enabled = cfg.CONF.stack_scheduler_hints + + def _parse_stack_preview(self, dest, preview): + ''' Walk the preview list (possibly nested) + + extracting parsed template dicts and storing modified versions in a flat dict. + ''' + # The preview is either a list or not. + if not isinstance(preview, list): + # Heat does not assign orchestration UUIDs to + # all resources, so we must make our own sometimes. + # This also means nested templates can't be supported yet. + + # FIXME: Either propose uniform use of UUIDs within + # Heat (related to Heat bug 1516807), or store + # resource UUIDs within the parsed template and + # use only Valet-originating UUIDs as keys. + if hasattr(preview, 'uuid') and \ + preview.uuid and validate_uuid4(preview.uuid): + key = preview.uuid + else: + # TODO(JD): Heat should be authoritative for UUID assignments. + # This will require a change to heat-engine. + # Looks like it may be: heat/db/sqlalchemy/models.py#L279 + # It could be that nested stacks aren't added to the DB yet. + key = str(uuid.uuid4()) + parsed = preview.parsed_template() + parsed['name'] = preview.name + # TODO(JD): Replace resource referenced names with their UUIDs. + dest[key] = parsed + else: + for item in preview: + self._parse_stack_preview(dest, item) + + def do_pre_op(self, cnxt, stack, current_stack=None, action=None): + ''' Method to be run by heat before stack operations. ''' + if not self.hints_enabled or stack.status != 'IN_PROGRESS': + return + + if action == 'DELETE': + self.api.plans_delete(stack, auth_token=cnxt.auth_token) + elif action == 'CREATE': + resources = dict() + specifications = dict() + reservations = dict() + + stack_preview = stack.preview_resources() + self._parse_stack_preview(resources, stack_preview) + + timeout = 60 + plan = { + 'plan_name': stack.id, + 'stack_id': stack.id, + 'timeout': '%d sec' % timeout, + } + if resources and len(resources) > 0: + plan['resources'] = resources + else: + return + if specifications: + plan['specifications'] = specifications + if reservations: + plan['reservations'] = reservations + + self.api.plans_create(stack, plan, auth_token=cnxt.auth_token) + + def do_post_op(self, cnxt, stack, current_stack=None, action=None, # pylint: disable=R0913 + is_stack_failure=False): + ''' Method to be run by heat after stack operations, including failures. + + On failure to execute all the registered pre_ops, this method will be + called if and only if the corresponding pre_op was successfully called. + On failures of the actual stack operation, this method will + be called if all the pre operations were successfully called. + ''' + pass + + def get_ordinal(self): + ''' An ordinal used to order class instances for pre and post operation execution. + + The values returned by get_ordinal are used to create a partial order + for pre and post operation method invocations. The default ordinal + value of 100 may be overridden. + If class1inst.ordinal() < class2inst.ordinal(), then the method on + class1inst will be executed before the method on class2inst. + If class1inst.ordinal() > class2inst.ordinal(), then the method on + class1inst will be executed after the method on class2inst. + If class1inst.ordinal() == class2inst.ordinal(), then the order of + method invocation is indeterminate. + ''' + return 100 diff --git a/valet_plugins/valet_plugins/plugins/nova/__init__.py b/valet_plugins/valet_plugins/plugins/nova/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/plugins/nova/valet_filter.py b/valet_plugins/valet_plugins/plugins/nova/valet_filter.py new file mode 100644 index 0000000..488113f --- /dev/null +++ b/valet_plugins/valet_plugins/plugins/nova/valet_filter.py @@ -0,0 +1,222 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing permissions and +# limitations under the License. + +'''Valet Nova Scheduler Filter''' + +from keystoneclient.v2_0 import client + +from nova.i18n import _ +from nova.i18n import _LI, _LW, _LE +from nova.scheduler import filters + +from valet_plugins.common import valet_api + +from oslo_config import cfg +from oslo_log import log as logging + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class ValetFilter(filters.BaseHostFilter): + '''Filter on Valet assignment.''' + + # Host state does not change within a request + run_filter_once_per_request = True + + # Used to authenticate request. Update via _authorize() + _auth_token = None + + def __init__(self): + '''Initializer''' + self.api = valet_api.ValetAPIWrapper() + self.opt_group_str = 'valet' + self.opt_failure_mode_str = 'failure_mode' + self.opt_project_name_str = 'admin_tenant_name' + self.opt_username_str = 'admin_username' + self.opt_password_str = 'admin_password' + self.opt_auth_uri_str = 'admin_auth_url' + self._register_opts() + + def _authorize(self): + '''Keystone AuthN''' + opt = getattr(cfg.CONF, self.opt_group_str) + project_name = opt[self.opt_project_name_str] + username = opt[self.opt_username_str] + password = opt[self.opt_password_str] + auth_uri = opt[self.opt_auth_uri_str] + + kwargs = { + 'username': username, + 'password': password, + 'tenant_name': project_name, + 'auth_url': auth_uri + } + keystone_client = client.Client(**kwargs) + self._auth_token = keystone_client.auth_token + + def _is_same_host(self, host, location): # pylint: disable=R0201 + '''Returns true if host matches location''' + return host == location + + def _register_opts(self): + '''Register Options''' + opts = [] + option = cfg.StrOpt(self.opt_failure_mode_str, choices=['reject', 'yield'], default='reject', + help=_('Mode to operate in if Valet planning fails for any reason.')) + opts.append(option) + option = cfg.StrOpt(self.opt_project_name_str, default=None, help=_('Valet Project Name')) + opts.append(option) + option = cfg.StrOpt(self.opt_username_str, default=None, help=_('Valet Username')) + opts.append(option) + option = cfg.StrOpt(self.opt_password_str, default=None, help=_('Valet Password')) + opts.append(option) + option = cfg.StrOpt(self.opt_auth_uri_str, default=None, help=_('Keystone Authorization API Endpoint')) + opts.append(option) + + opt_group = cfg.OptGroup(self.opt_group_str) + cfg.CONF.register_group(opt_group) + cfg.CONF.register_opts(opts, group=opt_group) + + # TODO(JD): Factor out common code between this and the cinder filter + def filter_all(self, filter_obj_list, filter_properties): + '''Filter all hosts in one swell foop''' + + hints_key = 'scheduler_hints' + orch_id_key = 'heat_resource_uuid' + + ad_hoc = False + yield_all = False + location = None + res_id = None + + opt = getattr(cfg.CONF, self.opt_group_str) + failure_mode = opt[self.opt_failure_mode_str] + + # Get the resource_id (physical id) + request_spec = filter_properties.get('request_spec') + instance_properties = request_spec.get('instance_properties') + res_id = instance_properties.get('uuid') + + # TODO(JD): If we can't reach Valet at all, we may opt to fail + # TODO(JD): all hosts depending on a TBD config flag. + +# if not filter_properties.get(hints_key, {}).has_key(orch_id_key): + if orch_id_key not in filter_properties.get(hints_key, {}): + self._authorize() + LOG.warn(_LW("Valet: Heat Stack Lifecycle Scheduler Hints not found. Performing ad-hoc placement.")) + ad_hoc = True + + # We'll need the flavor. + instance_type = filter_properties.get('instance_type') + flavor = instance_type.get('name') + + # Beacuse this wasn't orchestrated, there's no stack. + # We're going to compose a resource as if there as one. + # In this particular case we use the physical + # resource id as both the orchestration and stack id. + resources = { + res_id: { + "properties": { + "flavor": flavor, + }, + "type": "OS::Nova::Server", + "name": "ad_hoc_instance" + } + } + + # Only add the AZ if it was expressly defined + res_properties = resources[res_id]["properties"] + a_zone = instance_properties.get('availability_zone') + if a_zone: + res_properties["availability_zone"] = a_zone + + timeout = 60 + plan = { + 'plan_name': res_id, + 'stack_id': res_id, + 'timeout': '%d sec' % timeout, + 'resources': resources + } + try: + response = self.api.plans_create(None, plan, auth_token=self._auth_token) + except Exception: + # TODO(JD): Get context from exception + LOG.error(_LE("Valet did not respond to ad hoc placement request.")) + response = None + + if response and response.get('plan'): + plan = response['plan'] + if plan and plan.get('placements'): + placements = plan['placements'] + if placements.get(res_id): + placement = placements.get(res_id) + location = placement['location'] + + if not location: + LOG.error(_LE("Valet ad-hoc placement unknown for resource id %s.") % res_id) + if failure_mode == 'yield': + LOG.warn(_LW("Valet will yield to Nova for placement decisions.")) + yield_all = True + else: + yield_all = False + else: + orch_id = filter_properties[hints_key][orch_id_key] + self._authorize() + hosts = [obj.host for obj in filter_obj_list] + + try: + response = self.api.placement(orch_id, res_id, hosts=hosts, auth_token=self._auth_token) + except Exception: + print("Exception in creating placement") + LOG.error(_LW("Valet did not respond to placement request.")) + response = None + + if response and response.get('placement'): + placement = response['placement'] + if placement.get('location'): + + location = placement['location'] + + if not location: + # TODO(JD): Get context from exception + LOG.error(_LE("Valet placement unknown for resource id {0}, orchestration id {1}.").format(res_id, orch_id)) + if failure_mode == 'yield': + LOG.warn(_LW("Valet will yield to Nova for placement decisions.")) + yield_all = True + else: + yield_all = False + + # Yield the hosts that pass. + # Like the Highlander, there can (should) be only one. + # It's possible there could be none if Valet can't solve it. + for obj in filter_obj_list: + if location: + match = self._is_same_host(obj.host, location) + if match: + if ad_hoc: + LOG.info(_LI("Valet ad-hoc placement for resource id {0}: {1}.").format(res_id, obj.host)) + else: + LOG.info(_LI("Valet placement for resource id %s, orchestration id {0}: {1}.").format(res_id, orch_id, obj.host)) + else: + match = None + if yield_all or match: + yield obj + + def host_passes(self, host_state, filter_properties): # pylint: disable=W0613,R0201 + '''Individual host pass check''' + # Intentionally let filter_all() handle in one swell foop. + return False diff --git a/valet_plugins/valet_plugins/tests/__init__.py b/valet_plugins/valet_plugins/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/tests/base.py b/valet_plugins/valet_plugins/tests/base.py new file mode 100644 index 0000000..26665b8 --- /dev/null +++ b/valet_plugins/valet_plugins/tests/base.py @@ -0,0 +1,49 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2014-2016 AT&T +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import fixture as fixture_config +from oslo_log import log as logging +from oslotest.base import BaseTestCase + + +LOG = logging.getLogger(__name__) + + +class Base(BaseTestCase): + """Test case base class for all unit tests.""" + + def __init__(self, *args, **kwds): + ''' ''' + super(Base, self).__init__(*args, **kwds) + + self.CONF = self.useFixture(fixture_config.Config()).conf + + def setUp(self): + super(Base, self).setUp() + + def run_test(self, stack_name, template_path): + ''' main function ''' + pass + + def validate(self, result): + self.assertEqual(True, result.ok, result.message) + + def validate_test(self, result): + self.assertTrue(result) + + def get_name(self): + pass diff --git a/valet_plugins/valet_plugins/tests/unit/__init__.py b/valet_plugins/valet_plugins/tests/unit/__init__.py new file mode 100644 index 0000000..8b0772b --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/__init__.py @@ -0,0 +1,4 @@ +import os +import sys + +sys.path.insert(0, os.path.dirname(__file__) + "/mocks") diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/heat/__init__.py b/valet_plugins/valet_plugins/tests/unit/mocks/heat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/__init__.py b/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py b/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py new file mode 100644 index 0000000..56d5d53 --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py @@ -0,0 +1,7 @@ +''' +Created on Sep 14, 2016 + +@author: stack +''' + +_ = None diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/__init__.py b/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py b/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py new file mode 100644 index 0000000..8044105 --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py @@ -0,0 +1,25 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +Created on Sep 14, 2016 + +@author: stack +''' + + +class LifecyclePlugin(object): + ''' classdocs ''' + + def __init__(self, params): + ''' Constructor ''' diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/nova/__init__.py b/valet_plugins/valet_plugins/tests/unit/mocks/nova/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py b/valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py new file mode 100644 index 0000000..373df7f --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py @@ -0,0 +1,33 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +''' +Created on Sep 14, 2016 + +@author: stack +''' + + +def _(string): + pass + + +def _LI(string): + pass + + +def _LW(string): + pass + + +def _LE(string): + return string diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/__init__.py b/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py b/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py new file mode 100644 index 0000000..ad629e1 --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py @@ -0,0 +1,24 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +''' +Created on Sep 15, 2016 + +@author: stack +''' + + +class BaseHostFilter(object): + ''' classdocs ''' + + def __init__(self, params): + ''' Constructor ''' diff --git a/valet_plugins/valet_plugins/tests/unit/test_plugins.py b/valet_plugins/valet_plugins/tests/unit/test_plugins.py new file mode 100644 index 0000000..bb9d8db --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/test_plugins.py @@ -0,0 +1,57 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from valet_plugins.plugins.heat.plugins import ValetLifecyclePlugin +from valet_plugins.tests.base import Base + + +class TestPlugins(Base): + + def setUp(self): + super(TestPlugins, self).setUp() + + self.valet_life_cycle_plugin = self.init_ValetLifecyclePlugin() + + @mock.patch('valet_plugins.common.valet_api.ValetAPIWrapper') + def init_ValetLifecyclePlugin(self, mock_class): + with mock.patch('oslo_config.cfg.CONF'): + return ValetLifecyclePlugin() + + def test_do_pre_op(self): + stack = mock.MagicMock() + stack.status = "IN_PROGRESS" + + cnxt = mock.MagicMock() + cnxt.auth_token = "test_auth_token" + + # returns due to hints_enabled + self.valet_life_cycle_plugin.hints_enabled = False + self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="DELETE") + self.validate_test(self.valet_life_cycle_plugin.api.method_calls == []) + + # returns due to stack.status + self.valet_life_cycle_plugin.hints_enabled = True + stack.status = "NOT_IN_PROGRESS" + self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="DELETE") + self.validate_test(self.valet_life_cycle_plugin.api.method_calls == []) + + # action delete + self.valet_life_cycle_plugin.hints_enabled = True + stack.status = "IN_PROGRESS" + self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="DELETE") + self.validate_test("plans_delete" in self.valet_life_cycle_plugin.api.method_calls[0]) + + # action create + self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="CREATE") + self.validate_test("plans_create" in self.valet_life_cycle_plugin.api.method_calls[1]) diff --git a/valet_plugins/valet_plugins/tests/unit/test_valet_api.py b/valet_plugins/valet_plugins/tests/unit/test_valet_api.py new file mode 100644 index 0000000..cd1383b --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/test_valet_api.py @@ -0,0 +1,32 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from valet_plugins.tests.base import Base +from valet_plugins.common.valet_api import ValetAPIWrapper, requests + + +class TestValetApi(Base): + + def setUp(self): + super(TestValetApi, self).setUp() + self.valet_api_wrapper = self.init_ValetAPIWrapper() + + @mock.patch.object(ValetAPIWrapper, "_register_opts") + def init_ValetAPIWrapper(self, mock_api): + mock_api.return_value = None + return ValetAPIWrapper() + + @mock.patch.object(requests, 'request') + def test_plans_create(self, mock_request): + mock_request.post.return_value = None diff --git a/valet_plugins/valet_plugins/tests/unit/test_valet_filter.py b/valet_plugins/valet_plugins/tests/unit/test_valet_filter.py new file mode 100644 index 0000000..7219265 --- /dev/null +++ b/valet_plugins/valet_plugins/tests/unit/test_valet_filter.py @@ -0,0 +1,71 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneclient.v2_0 import client +import mock +from valet_plugins.common import valet_api +from valet_plugins.plugins.nova.valet_filter import ValetFilter +from valet_plugins.tests.base import Base + + +class TestResources(object): + def __init__(self, host_name): + self.host = host_name + + +class TestValetFilter(Base): + + def setUp(self): + super(TestValetFilter, self).setUp() + + client.Client = mock.MagicMock() + self.valet_filter = self.init_ValetFilter() + + @mock.patch.object(valet_api.ValetAPIWrapper, '_register_opts') + @mock.patch.object(ValetFilter, '_register_opts') + def init_ValetFilter(self, mock_opt, mock_init): + mock_init.return_value = None + mock_opt.return_value = None + return ValetFilter() + + @mock.patch.object(valet_api.ValetAPIWrapper, 'plans_create') + @mock.patch.object(valet_api.ValetAPIWrapper, 'placement') + def test_filter_all(self, mock_placement, mock_create): + mock_placement.return_value = None + mock_create.return_value = None + + with mock.patch('oslo_config.cfg.CONF') as config: + setattr(config, "valet", {self.valet_filter.opt_failure_mode_str: "yield", + self.valet_filter.opt_project_name_str: "test_admin_tenant_name", + self.valet_filter.opt_username_str: "test_admin_username", + self.valet_filter.opt_password_str: "test_admin_password", + self.valet_filter.opt_auth_uri_str: "test_admin_auth_url"}) + + filter_properties = {'request_spec': {'instance_properties': {'uuid': ""}}, + 'scheduler_hints': {'heat_resource_uuid': "123456"}, + 'instance_type': {'name': "instance_name"}} + + resources = self.valet_filter.filter_all([TestResources("first_host"), TestResources("second_host")], filter_properties) + + for resource in resources: + self.validate_test(resource.host in "first_host, second_host") + self.validate_test(mock_placement.called) + + filter_properties = {'request_spec': {'instance_properties': {'uuid': ""}}, + 'scheduler_hints': "scheduler_hints", + 'instance_type': {'name': "instance_name"}} + + resources = self.valet_filter.filter_all([TestResources("first_host"), TestResources("second_host")], filter_properties) + + for _ in resources: + self.validate_test(mock_create.called)