From c7082fa72ac73b23b48ce63fc82aa7da2d3e5d6a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 17 Oct 2015 16:03:27 -0400 Subject: [PATCH] Retire stackforge/libra --- .gitignore | 19 - .gitreview | 4 - .mailmap | 1 - .testr.conf | 4 - MANIFEST.in | 9 - README | 47 - README.rst | 7 + bin/client.py | 88 - bin/mnbtest.py | 54 - bin/upgrade_worker_V1toV2.sh | 216 -- bin/upgrade_worker_V2toV3.sh | 114 - build_pdf.sh | 6 - doc/admin_api/about.rst | 22 - doc/admin_api/api.rst | 775 ----- doc/admin_api/config.rst | 230 -- doc/admin_api/index.rst | 14 - doc/admin_api/schedulers.rst | 93 - doc/admin_api/stats-drivers.rst | 67 - doc/admin_api/v1api.rst | 378 --- doc/api/about.rst | 16 - doc/api/api.rst | 635 ---- doc/api/config.rst | 126 - doc/api/index.rst | 11 - doc/api/rest/algorithms.rst | 116 - doc/api/rest/health-monitor.rst | 360 -- doc/api/rest/limits.rst | 117 - doc/api/rest/load-balancer.rst | 817 ----- doc/api/rest/logs.rst | 70 - doc/api/rest/node.rst | 566 ---- doc/api/rest/protocols.rst | 107 - doc/api/rest/vip.rst | 101 - doc/architecture/index.rst | 10 - doc/architecture/logical.rst | 32 - doc/architecture/production.rst | 36 - doc/conf.py | 234 -- doc/config.rst | 236 -- doc/glossary.rst | 36 - doc/img/libralayout.png | Bin 65055 -> 0 bytes doc/img/production.png | Bin 47987 -> 0 bytes doc/index.rst | 14 - doc/install/development.rst | 283 -- doc/install/diskimage-builder.rst | 113 - doc/install/index.rst | 13 - doc/install/openstack.rst | 298 -- doc/install/ppa.rst | 32 - doc/install/production.rst | 21 - doc/install/verify.rst | 11 - doc/pool_mgm/about.rst | 19 - doc/pool_mgm/commands.rst | 154 - doc/pool_mgm/config.rst | 135 - doc/pool_mgm/index.rst | 11 - doc/sources/libralayout.odg | Bin 16493 -> 0 bytes doc/worker/about.rst | 86 - doc/worker/code.rst | 111 - doc/worker/config.rst | 40 - doc/worker/driver.rst | 71 - doc/worker/drivers/haproxy.rst | 64 - doc/worker/index.rst | 14 - doc/worker/messages.rst | 394 --- etc/libra.cfg | 210 -- etc/logging.conf | 35 - etc/mnb.cfg | 39 - libra/__init__.py | 18 - libra/admin_api/__init__.py | 144 - libra/admin_api/acl.py | 95 - libra/admin_api/app.py | 199 -- libra/admin_api/config.py | 26 - libra/admin_api/controllers/__init__.py | 13 - libra/admin_api/controllers/root.py | 49 - libra/admin_api/controllers/v1/__init__.py | 13 - libra/admin_api/controllers/v1/devices.py | 335 -- libra/admin_api/controllers/v1/v1.py | 36 - libra/admin_api/controllers/v2/__init__.py | 13 - libra/admin_api/controllers/v2/devices.py | 250 -- .../admin_api/controllers/v2/loadbalancers.py | 193 -- libra/admin_api/controllers/v2/status.py | 260 -- libra/admin_api/controllers/v2/user.py | 191 -- libra/admin_api/controllers/v2/v2_0.py | 42 - libra/admin_api/device_pool/__init__.py | 13 - libra/admin_api/device_pool/manage_pool.py | 397 --- libra/admin_api/expunge/__init__.py | 13 - libra/admin_api/expunge/expunge.py | 74 - libra/admin_api/library/__init__.py | 13 - libra/admin_api/library/rebuild.py | 99 - libra/admin_api/model/__init__.py | 27 - libra/admin_api/model/responses.py | 70 - libra/admin_api/model/validators.py | 49 - libra/admin_api/stats/__init__.py | 13 - libra/admin_api/stats/billing_sched.py | 192 -- libra/admin_api/stats/drivers/__init__.py | 13 - libra/admin_api/stats/drivers/base.py | 29 - .../stats/drivers/database/__init__.py | 13 - .../stats/drivers/database/driver.py | 76 - .../stats/drivers/datadog/__init__.py | 13 - .../admin_api/stats/drivers/datadog/driver.py | 55 - .../admin_api/stats/drivers/dummy/__init__.py | 13 - libra/admin_api/stats/drivers/dummy/driver.py | 29 - libra/admin_api/stats/offline_sched.py | 164 - libra/admin_api/stats/ping_sched.py | 246 -- libra/admin_api/stats/stats_gearman.py | 224 -- libra/admin_api/stats/stats_sched.py | 216 -- libra/api/__init__.py | 59 - libra/api/acl.py | 72 - libra/api/app.py | 159 - libra/api/config.py | 26 - libra/api/controllers/__init__.py | 13 - libra/api/controllers/connection_throttle.py | 61 - libra/api/controllers/health_monitor.py | 306 -- libra/api/controllers/limits.py | 47 - libra/api/controllers/load_balancers.py | 808 ----- libra/api/controllers/logs.py | 93 - libra/api/controllers/nodes.py | 440 --- libra/api/controllers/protocols.py | 37 - libra/api/controllers/root.py | 46 - libra/api/controllers/session_persistence.py | 61 - libra/api/controllers/v1.py | 46 - libra/api/controllers/virtualips.py | 73 - libra/api/library/__init__.py | 0 libra/api/library/exp.py | 68 - libra/api/library/ip_filter.py | 29 - libra/api/model/__init__.py | 27 - libra/api/model/responses.py | 72 - libra/api/model/validators.py | 126 - libra/api/templates/error.html | 12 - libra/api/wsme_overrides.py | 159 - libra/common/__init__.py | 13 - libra/common/api/__init__.py | 13 - libra/common/api/gearman_client.py | 554 ---- libra/common/api/lbaas.py | 302 -- libra/common/api/lbaas.sql | 162 - libra/common/api/mnb.py | 368 --- libra/common/api/server.py | 24 - libra/common/exc.py | 39 - libra/common/faults.py | 61 - libra/common/json_gearman.py | 40 - libra/common/log.py | 99 - libra/common/options.py | 93 - libra/gear/__init__.py | 2918 ----------------- libra/gear/acl.py | 289 -- libra/gear/constants.py | 83 - libra/mgm/__init__.py | 92 - libra/mgm/controllers/__init__.py | 13 - libra/mgm/controllers/build.py | 181 - libra/mgm/controllers/delete.py | 55 - libra/mgm/controllers/root.py | 79 - libra/mgm/controllers/vip.py | 222 -- libra/mgm/gearman_worker.py | 75 - libra/mgm/mgm.py | 93 - libra/mgm/nova.py | 268 -- libra/openstack/__init__.py | 0 libra/openstack/common/__init__.py | 0 libra/openstack/common/context.py | 86 - libra/openstack/common/crypto/__init__.py | 0 libra/openstack/common/crypto/utils.py | 179 - libra/openstack/common/eventlet_backdoor.py | 146 - libra/openstack/common/excutils.py | 101 - libra/openstack/common/fileutils.py | 139 - libra/openstack/common/fixture/__init__.py | 0 libra/openstack/common/fixture/config.py | 46 - libra/openstack/common/fixture/lockutils.py | 53 - libra/openstack/common/fixture/mockpatch.py | 51 - libra/openstack/common/fixture/moxstubout.py | 34 - libra/openstack/common/gettextutils.py | 373 --- libra/openstack/common/importutils.py | 68 - libra/openstack/common/jsonutils.py | 180 - libra/openstack/common/local.py | 47 - libra/openstack/common/lockutils.py | 278 -- libra/openstack/common/log.py | 625 ---- libra/openstack/common/loopingcall.py | 147 - libra/openstack/common/network_utils.py | 81 - libra/openstack/common/notifier/__init__.py | 14 - libra/openstack/common/notifier/api.py | 173 - .../openstack/common/notifier/log_notifier.py | 37 - .../common/notifier/no_op_notifier.py | 19 - .../openstack/common/notifier/rpc_notifier.py | 47 - .../common/notifier/rpc_notifier2.py | 53 - .../common/notifier/test_notifier.py | 22 - libra/openstack/common/rpc/__init__.py | 306 -- libra/openstack/common/rpc/amqp.py | 636 ---- libra/openstack/common/rpc/common.py | 506 --- libra/openstack/common/rpc/dispatcher.py | 178 - libra/openstack/common/rpc/impl_fake.py | 195 -- libra/openstack/common/rpc/impl_kombu.py | 856 ----- libra/openstack/common/rpc/impl_qpid.py | 833 ----- libra/openstack/common/rpc/impl_zmq.py | 818 ----- libra/openstack/common/rpc/matchmaker.py | 324 -- .../openstack/common/rpc/matchmaker_redis.py | 145 - libra/openstack/common/rpc/matchmaker_ring.py | 108 - libra/openstack/common/rpc/proxy.py | 226 -- libra/openstack/common/rpc/securemessage.py | 521 --- libra/openstack/common/rpc/serializer.py | 53 - libra/openstack/common/rpc/service.py | 78 - libra/openstack/common/rpc/zmq_receiver.py | 40 - libra/openstack/common/service.py | 462 --- libra/openstack/common/sslutils.py | 100 - libra/openstack/common/test.py | 53 - libra/openstack/common/threadgroup.py | 121 - libra/openstack/common/timeutils.py | 197 -- libra/openstack/common/uuidutils.py | 39 - libra/openstack/common/versionutils.py | 45 - libra/openstack/common/xmlutils.py | 74 - libra/tests/__init__.py | 13 - libra/tests/admin_api/__init__.py | 13 - libra/tests/api/__init__.py | 13 - libra/tests/api/v1_1/__init__.py | 38 - libra/tests/api_base.py | 37 - libra/tests/base.py | 179 - libra/tests/fake_body.json | 2 - libra/tests/mgm/___init__.py | 13 - libra/tests/mock_objects.py | 89 - libra/tests/worker/__init__.py | 13 - libra/tests/worker/test_controller.py | 484 --- libra/tests/worker/test_driver_haproxy.py | 141 - libra/tests/worker/test_stats.py | 90 - libra/worker/__init__.py | 34 - libra/worker/controller.py | 566 ---- libra/worker/drivers/__init__.py | 13 - libra/worker/drivers/base.py | 131 - libra/worker/drivers/haproxy/__init__.py | 36 - libra/worker/drivers/haproxy/driver.py | 435 --- libra/worker/drivers/haproxy/query.py | 135 - libra/worker/drivers/haproxy/services_base.py | 71 - libra/worker/drivers/haproxy/stats.py | 167 - .../worker/drivers/haproxy/ubuntu_services.py | 298 -- libra/worker/main.py | 152 - libra/worker/worker.py | 84 - openstack-common.conf | 14 - requirements.txt | 19 - setup.cfg | 31 - setup.py | 21 - test-requirements.txt | 9 - tox.ini | 30 - 232 files changed, 7 insertions(+), 34123 deletions(-) delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 .mailmap delete mode 100644 .testr.conf delete mode 100644 MANIFEST.in delete mode 100644 README create mode 100644 README.rst delete mode 100755 bin/client.py delete mode 100755 bin/mnbtest.py delete mode 100755 bin/upgrade_worker_V1toV2.sh delete mode 100755 bin/upgrade_worker_V2toV3.sh delete mode 100755 build_pdf.sh delete mode 100644 doc/admin_api/about.rst delete mode 100644 doc/admin_api/api.rst delete mode 100644 doc/admin_api/config.rst delete mode 100644 doc/admin_api/index.rst delete mode 100644 doc/admin_api/schedulers.rst delete mode 100644 doc/admin_api/stats-drivers.rst delete mode 100644 doc/admin_api/v1api.rst delete mode 100644 doc/api/about.rst delete mode 100644 doc/api/api.rst delete mode 100644 doc/api/config.rst delete mode 100644 doc/api/index.rst delete mode 100644 doc/api/rest/algorithms.rst delete mode 100644 doc/api/rest/health-monitor.rst delete mode 100644 doc/api/rest/limits.rst delete mode 100644 doc/api/rest/load-balancer.rst delete mode 100644 doc/api/rest/logs.rst delete mode 100644 doc/api/rest/node.rst delete mode 100644 doc/api/rest/protocols.rst delete mode 100644 doc/api/rest/vip.rst delete mode 100644 doc/architecture/index.rst delete mode 100644 doc/architecture/logical.rst delete mode 100644 doc/architecture/production.rst delete mode 100644 doc/conf.py delete mode 100644 doc/config.rst delete mode 100644 doc/glossary.rst delete mode 100644 doc/img/libralayout.png delete mode 100644 doc/img/production.png delete mode 100644 doc/index.rst delete mode 100644 doc/install/development.rst delete mode 100644 doc/install/diskimage-builder.rst delete mode 100644 doc/install/index.rst delete mode 100644 doc/install/openstack.rst delete mode 100644 doc/install/ppa.rst delete mode 100644 doc/install/production.rst delete mode 100644 doc/install/verify.rst delete mode 100644 doc/pool_mgm/about.rst delete mode 100644 doc/pool_mgm/commands.rst delete mode 100644 doc/pool_mgm/config.rst delete mode 100644 doc/pool_mgm/index.rst delete mode 100644 doc/sources/libralayout.odg delete mode 100644 doc/worker/about.rst delete mode 100644 doc/worker/code.rst delete mode 100644 doc/worker/config.rst delete mode 100644 doc/worker/driver.rst delete mode 100644 doc/worker/drivers/haproxy.rst delete mode 100644 doc/worker/index.rst delete mode 100644 doc/worker/messages.rst delete mode 100644 etc/libra.cfg delete mode 100644 etc/logging.conf delete mode 100644 etc/mnb.cfg delete mode 100644 libra/__init__.py delete mode 100644 libra/admin_api/__init__.py delete mode 100644 libra/admin_api/acl.py delete mode 100644 libra/admin_api/app.py delete mode 100644 libra/admin_api/config.py delete mode 100644 libra/admin_api/controllers/__init__.py delete mode 100644 libra/admin_api/controllers/root.py delete mode 100644 libra/admin_api/controllers/v1/__init__.py delete mode 100644 libra/admin_api/controllers/v1/devices.py delete mode 100644 libra/admin_api/controllers/v1/v1.py delete mode 100644 libra/admin_api/controllers/v2/__init__.py delete mode 100644 libra/admin_api/controllers/v2/devices.py delete mode 100644 libra/admin_api/controllers/v2/loadbalancers.py delete mode 100644 libra/admin_api/controllers/v2/status.py delete mode 100644 libra/admin_api/controllers/v2/user.py delete mode 100644 libra/admin_api/controllers/v2/v2_0.py delete mode 100644 libra/admin_api/device_pool/__init__.py delete mode 100644 libra/admin_api/device_pool/manage_pool.py delete mode 100644 libra/admin_api/expunge/__init__.py delete mode 100644 libra/admin_api/expunge/expunge.py delete mode 100644 libra/admin_api/library/__init__.py delete mode 100644 libra/admin_api/library/rebuild.py delete mode 100644 libra/admin_api/model/__init__.py delete mode 100644 libra/admin_api/model/responses.py delete mode 100644 libra/admin_api/model/validators.py delete mode 100644 libra/admin_api/stats/__init__.py delete mode 100644 libra/admin_api/stats/billing_sched.py delete mode 100644 libra/admin_api/stats/drivers/__init__.py delete mode 100644 libra/admin_api/stats/drivers/base.py delete mode 100644 libra/admin_api/stats/drivers/database/__init__.py delete mode 100644 libra/admin_api/stats/drivers/database/driver.py delete mode 100644 libra/admin_api/stats/drivers/datadog/__init__.py delete mode 100644 libra/admin_api/stats/drivers/datadog/driver.py delete mode 100644 libra/admin_api/stats/drivers/dummy/__init__.py delete mode 100644 libra/admin_api/stats/drivers/dummy/driver.py delete mode 100644 libra/admin_api/stats/offline_sched.py delete mode 100644 libra/admin_api/stats/ping_sched.py delete mode 100644 libra/admin_api/stats/stats_gearman.py delete mode 100644 libra/admin_api/stats/stats_sched.py delete mode 100644 libra/api/__init__.py delete mode 100644 libra/api/acl.py delete mode 100644 libra/api/app.py delete mode 100644 libra/api/config.py delete mode 100644 libra/api/controllers/__init__.py delete mode 100644 libra/api/controllers/connection_throttle.py delete mode 100644 libra/api/controllers/health_monitor.py delete mode 100644 libra/api/controllers/limits.py delete mode 100644 libra/api/controllers/load_balancers.py delete mode 100644 libra/api/controllers/logs.py delete mode 100644 libra/api/controllers/nodes.py delete mode 100644 libra/api/controllers/protocols.py delete mode 100644 libra/api/controllers/root.py delete mode 100644 libra/api/controllers/session_persistence.py delete mode 100644 libra/api/controllers/v1.py delete mode 100644 libra/api/controllers/virtualips.py delete mode 100644 libra/api/library/__init__.py delete mode 100644 libra/api/library/exp.py delete mode 100644 libra/api/library/ip_filter.py delete mode 100644 libra/api/model/__init__.py delete mode 100644 libra/api/model/responses.py delete mode 100644 libra/api/model/validators.py delete mode 100644 libra/api/templates/error.html delete mode 100644 libra/api/wsme_overrides.py delete mode 100644 libra/common/__init__.py delete mode 100644 libra/common/api/__init__.py delete mode 100644 libra/common/api/gearman_client.py delete mode 100644 libra/common/api/lbaas.py delete mode 100644 libra/common/api/lbaas.sql delete mode 100644 libra/common/api/mnb.py delete mode 100644 libra/common/api/server.py delete mode 100644 libra/common/exc.py delete mode 100644 libra/common/faults.py delete mode 100644 libra/common/json_gearman.py delete mode 100644 libra/common/log.py delete mode 100644 libra/common/options.py delete mode 100644 libra/gear/__init__.py delete mode 100644 libra/gear/acl.py delete mode 100644 libra/gear/constants.py delete mode 100644 libra/mgm/__init__.py delete mode 100644 libra/mgm/controllers/__init__.py delete mode 100644 libra/mgm/controllers/build.py delete mode 100644 libra/mgm/controllers/delete.py delete mode 100644 libra/mgm/controllers/root.py delete mode 100644 libra/mgm/controllers/vip.py delete mode 100644 libra/mgm/gearman_worker.py delete mode 100644 libra/mgm/mgm.py delete mode 100644 libra/mgm/nova.py delete mode 100644 libra/openstack/__init__.py delete mode 100644 libra/openstack/common/__init__.py delete mode 100644 libra/openstack/common/context.py delete mode 100644 libra/openstack/common/crypto/__init__.py delete mode 100644 libra/openstack/common/crypto/utils.py delete mode 100644 libra/openstack/common/eventlet_backdoor.py delete mode 100644 libra/openstack/common/excutils.py delete mode 100644 libra/openstack/common/fileutils.py delete mode 100644 libra/openstack/common/fixture/__init__.py delete mode 100644 libra/openstack/common/fixture/config.py delete mode 100644 libra/openstack/common/fixture/lockutils.py delete mode 100644 libra/openstack/common/fixture/mockpatch.py delete mode 100644 libra/openstack/common/fixture/moxstubout.py delete mode 100644 libra/openstack/common/gettextutils.py delete mode 100644 libra/openstack/common/importutils.py delete mode 100644 libra/openstack/common/jsonutils.py delete mode 100644 libra/openstack/common/local.py delete mode 100644 libra/openstack/common/lockutils.py delete mode 100644 libra/openstack/common/log.py delete mode 100644 libra/openstack/common/loopingcall.py delete mode 100644 libra/openstack/common/network_utils.py delete mode 100644 libra/openstack/common/notifier/__init__.py delete mode 100644 libra/openstack/common/notifier/api.py delete mode 100644 libra/openstack/common/notifier/log_notifier.py delete mode 100644 libra/openstack/common/notifier/no_op_notifier.py delete mode 100644 libra/openstack/common/notifier/rpc_notifier.py delete mode 100644 libra/openstack/common/notifier/rpc_notifier2.py delete mode 100644 libra/openstack/common/notifier/test_notifier.py delete mode 100644 libra/openstack/common/rpc/__init__.py delete mode 100644 libra/openstack/common/rpc/amqp.py delete mode 100644 libra/openstack/common/rpc/common.py delete mode 100644 libra/openstack/common/rpc/dispatcher.py delete mode 100644 libra/openstack/common/rpc/impl_fake.py delete mode 100644 libra/openstack/common/rpc/impl_kombu.py delete mode 100644 libra/openstack/common/rpc/impl_qpid.py delete mode 100644 libra/openstack/common/rpc/impl_zmq.py delete mode 100644 libra/openstack/common/rpc/matchmaker.py delete mode 100644 libra/openstack/common/rpc/matchmaker_redis.py delete mode 100644 libra/openstack/common/rpc/matchmaker_ring.py delete mode 100644 libra/openstack/common/rpc/proxy.py delete mode 100644 libra/openstack/common/rpc/securemessage.py delete mode 100644 libra/openstack/common/rpc/serializer.py delete mode 100644 libra/openstack/common/rpc/service.py delete mode 100644 libra/openstack/common/rpc/zmq_receiver.py delete mode 100644 libra/openstack/common/service.py delete mode 100644 libra/openstack/common/sslutils.py delete mode 100644 libra/openstack/common/test.py delete mode 100644 libra/openstack/common/threadgroup.py delete mode 100644 libra/openstack/common/timeutils.py delete mode 100644 libra/openstack/common/uuidutils.py delete mode 100644 libra/openstack/common/versionutils.py delete mode 100644 libra/openstack/common/xmlutils.py delete mode 100644 libra/tests/__init__.py delete mode 100644 libra/tests/admin_api/__init__.py delete mode 100644 libra/tests/api/__init__.py delete mode 100644 libra/tests/api/v1_1/__init__.py delete mode 100644 libra/tests/api_base.py delete mode 100644 libra/tests/base.py delete mode 100644 libra/tests/fake_body.json delete mode 100644 libra/tests/mgm/___init__.py delete mode 100644 libra/tests/mock_objects.py delete mode 100644 libra/tests/worker/__init__.py delete mode 100644 libra/tests/worker/test_controller.py delete mode 100644 libra/tests/worker/test_driver_haproxy.py delete mode 100644 libra/tests/worker/test_stats.py delete mode 100644 libra/worker/__init__.py delete mode 100644 libra/worker/controller.py delete mode 100644 libra/worker/drivers/__init__.py delete mode 100644 libra/worker/drivers/base.py delete mode 100644 libra/worker/drivers/haproxy/__init__.py delete mode 100644 libra/worker/drivers/haproxy/driver.py delete mode 100644 libra/worker/drivers/haproxy/query.py delete mode 100644 libra/worker/drivers/haproxy/services_base.py delete mode 100644 libra/worker/drivers/haproxy/stats.py delete mode 100644 libra/worker/drivers/haproxy/ubuntu_services.py delete mode 100644 libra/worker/main.py delete mode 100644 libra/worker/worker.py delete mode 100644 openstack-common.conf delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index d29e0084..00000000 --- a/.gitignore +++ /dev/null @@ -1,19 +0,0 @@ -*.egg -*.egg-info -*.pyc -*.swp -.cache -.testrepository -.tox -AUTHORS -build -ChangeLog -debian/files -debian/libra -debian/libra.debhelper.log -debian/libra.postinst.debhelper -debian/libra.preinst.debhelper -debian/libra.prerm.debhelper -debian/libra.substvars -dist -doc/html diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 76a6eee9..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=stackforge/libra.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 34420bca..00000000 --- a/.mailmap +++ /dev/null @@ -1 +0,0 @@ - <=> diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 60477e87..00000000 --- a/.testr.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} ${PYTHON:-python} -m subunit.run discover -t ./ ./ $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index da06cc67..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,9 +0,0 @@ -include README - -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc - -graft doc -graft etc diff --git a/README b/README deleted file mode 100644 index aaa7c5b1..00000000 --- a/README +++ /dev/null @@ -1,47 +0,0 @@ -Description ------------ - -Libra is a tool set to create and manage load balancers in an OpenStack -environment. - -Tools ------ - - * libra_pool_mgm - - Python daemon that manages a pool of Nova instances. - - * libra_worker - - Python daemon that will receive messages from an API server via - a Gearman job server to create/modify load balancers on the local - machine. - - * libra_api - - Python daemon to act as the client API server. - - * libra_admin_api - - Python daemon providing an admininstrative API server primarily for - libra_pool_mgm and libra_statsd - -Running Tests -------------- - -Tox is the best way to run the tests. Tox, if unavailable, can be installed -via the Python pip command: - - $ pip install tox - -Once it is installed, run the tests: - - $ tox - -More Documentation ------------------- - -You can build the complete documentation with: - - $ pip install Sphinx - $ python setup.py build_sphinx diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..9006052a --- /dev/null +++ b/README.rst @@ -0,0 +1,7 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git source code +management system. To see the contents of this repository before it reached +its end of life, please check out the previous commit with +"git checkout HEAD^1". + diff --git a/bin/client.py b/bin/client.py deleted file mode 100755 index e526e652..00000000 --- a/bin/client.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -############################################################################## -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################## - -import json -import socket -from gearman import GearmanClient, DataEncoder, JOB_UNKNOWN - - -class JSONDataEncoder(DataEncoder): - @classmethod - def encode(cls, encodable_object): - s = json.dumps(encodable_object) - print("Encoding JSON object to string: %s" % s) - return s - - @classmethod - def decode(cls, decodable_string): - s = json.loads(decodable_string) - print("Decoding string (%s) to JSON object" % s) - return s - - -class JSONGearmanClient(GearmanClient): - data_encoder = JSONDataEncoder - - -def check_request_status(job_request): - if job_request.complete: - print "Job %s finished! Result: %s -\n%s" % (job_request.job.unique, - job_request.state, - json.dumps( - job_request.result, - indent=2 - )) - elif job_request.timed_out: - print "Job %s timed out!" % job_request.unique - elif job_request.state == JOB_UNKNOWN: - print "Job %s connection failed!" % job_request.unique - - -def main(): - hostname = socket.gethostname() - task = hostname - client = JSONGearmanClient(['localhost:4730']) - data = """ -{ - "hpcs_action": "update", - "loadbalancers": [ - { - "name": "a-new-loadbalancer", - "protocol": "http", - "nodes": [ - { - "address": "10.1.1.1", - "port": "80" - }, - { - "address": "10.1.1.2", - "port": "81" - } - ] - } - ] -} -""" - - # Worker class expects the data as a JSON object, not string - json_data = json.loads(data) - request = client.submit_job(task, json_data) - check_request_status(request) - -if __name__ == "__main__": - main() diff --git a/bin/mnbtest.py b/bin/mnbtest.py deleted file mode 100755 index cc67fbf8..00000000 --- a/bin/mnbtest.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python -############################################################################## -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################## -import logging as std_logging -import time - -from oslo.config import cfg -from libra.openstack.common import log as logging -from libra.common.api.mnb import update_mnb -from libra import __version__ - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -CONF.register_opts([ - cfg.IntOpt('testcount', - metavar='COUNT', - default=1, - help='Number of messages to send') -]) - - -def main(): - CONF(project='mnbtest', version=__version__) - logging.setup('mnbtest') - LOG.debug('Configuration:') - - print "Starting Test" - print "LOG FILE = {0}".format(CONF.log_file) - LOG.info('STARTING MNBTEST') - CONF.log_opt_values(LOG, std_logging.DEBUG) - - LOG.info("Calling update_mnb with {0} messages".format(CONF.testcount)) - update_mnb('lbaas.instance.test', CONF.testcount, 456) - - time.sleep(30) - -if __name__ == "__main__": - main() diff --git a/bin/upgrade_worker_V1toV2.sh b/bin/upgrade_worker_V1toV2.sh deleted file mode 100755 index 9318561c..00000000 --- a/bin/upgrade_worker_V1toV2.sh +++ /dev/null @@ -1,216 +0,0 @@ -#!/bin/bash -############################################################################## -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################## - - -############################################################################## -# DESCRIPTION -# This script is used to manually upgrade a worker node running a 1.0 -# version of Libra to the 2.0 version. This is specifically targeted to -# Ubuntu nodes, but may work on other distributions, though that is -# untested. It makes some assumptions about the current setup. -# -# This script is designed to be safe to run multiple times, in case an -# error is encountered and it must be run again. -# -# EXIT VALUES -# 0 on success, 1 on error -############################################################################## - -if [ $USER != "root" ] -then - echo "Must be run as root user." - exit 1 -fi - -LOG="/tmp/update_node.log" - -if [ -e ${LOG} ] -then - rm -f ${LOG} -fi - -################################################# -# Update sudo privs by inserting '/usr/bin/chown' -################################################# -file="/etc/sudoers" -echo "Updating SUDO file $file" | tee -a ${LOG} - -# Uncomment below if you run the libra_worker process as the 'haproxy' user. - -#sed -i.bak -e '/^%haproxy/ c\ -#%haproxy ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /usr/bin/socat, /bin/chown' ${file} -#if [ $? -ne 0 ] -#then -# echo "1st edit of ${file} failed." | tee -a ${LOG} -# exit 1 -#fi - -sed -i.bak -e '/^%libra/ c\ -%libra ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /usr/bin/socat, /bin/chown' ${file} -if [ $? -ne 0 ] -then - echo "2nd edit of ${file} failed." | tee -a ${LOG} - exit 1 -fi - -if [ -e ${file}.bak ] -then - rm ${file}.bak -fi - - -######################## -# Make new log directory -######################## -logdir="/mnt/log" -echo "Creating ${logdir}" | tee -a ${LOG} - -if [ ! -e ${logdir} ] -then - mkdir ${logdir} - if [ $? -ne 0 ] - then - echo "Making log directory ${logdir} failed" | tee -a ${LOG} - exit 1 - fi -fi - - -####################################### -# Create /etc/rsyslog.d/10-haproxy.conf -####################################### -haproxy_syslog="/etc/rsyslog.d/10-haproxy.conf" -echo "Creating ${haproxy_syslog}" | tee -a ${LOG} - -cat > ${haproxy_syslog} <<'EOF' -$template Haproxy,"%TIMESTAMP% %msg%\n" -local0.* -/mnt/log/haproxy.log;Haproxy -# don't log anywhere else -local0.* ~ -EOF - -if [ $? -ne 0 ] -then - echo "Creating ${haproxy_syslog} failed." | tee -a ${LOG} - exit 1 -fi - - -################################# -# Create /etc/logrotate.d/haproxy -################################# -haproxy_logrotate="/etc/logrotate.d/haproxy" -echo "Creating ${haproxy_logrotate}" | tee -a ${LOG} - -cat > ${haproxy_logrotate} <<'EOF' -/mnt/log/haproxy.log { - weekly - missingok - rotate 7 - compress - delaycompress - notifempty - create 640 syslog adm - sharedscripts - postrotate - /etc/init.d/haproxy reload > /dev/null - endscript -} -EOF - -if [ $? -ne 0 ] -then - echo "Creating ${haproxy_logrotate} failed." | tee -a ${LOG} - exit 1 -fi - - -########################## -# Edit current haproxy.cfg -########################## -haproxycfg="/etc/haproxy/haproxy.cfg" -echo "Updating HAProxy config file ${haproxycfg}" | tee -a ${LOG} - -if [ -e ${haproxycfg} ] -then - sed -i.bak -e '/local1 notice/d' ${haproxycfg} - if [ $? -ne 0 ] - then - echo "Editing ${haproxycfg} failed." | tee -a ${LOG} - exit 1 - fi -fi - -if [ -e ${haproxycfg}.bak ] -then - rm -f ${haproxycfg}.bak -fi - - -############## -# Update Libra -############## -pkgversion="libra-2.0" -pkglocation="/tmp" -tarball="http://tarballs.openstack.org/libra/${pkgversion}.tar.gz" - -echo "Downloading ${pkgversion} tarball to ${pkglocation}" | tee -a ${LOG} - -cd $pkglocation -if [ $? -ne 0 ]; then echo "cd to ${pkglocation} failed" | tee -a ${LOG}; exit 1; fi -curl -Osf ${tarball} -if [ $? -ne 0 ]; then echo "Failed to download ${tarball}" | tee -a ${LOG}; exit 1; fi - -echo "Updating Libra to ${pkgversion}" | tee -a ${LOG} - -tar zxf ${pkgversion}.tar.gz 2>&1 >> ${LOG} -if [ $? -ne 0 ]; then echo "tar failed" | tee -a ${LOG}; exit 1; fi -cd ${pkgversion} -if [ $? -ne 0 ]; then echo "cd to ${pkgversion} failed" | tee -a ${LOG}; exit 1; fi -python setup.py install --install-layout=deb 2>&1 >> ${LOG} -if [ $? -ne 0 ]; then echo "python install failed" | tee -a ${LOG}; exit 1; fi - - -################## -# Restart rsyslogd -################## -echo "Restarting rsyslogd" | tee -a ${LOG} -service rsyslog restart 2>&1 >> ${LOG} -if [ $? -ne 0 ]; then echo "rsyslog restart failed" | tee -a ${LOG}; exit 1; fi - - -################# -# Restart haproxy -################# -echo "Restarting haproxy" | tee -a ${LOG} -service haproxy restart 2>&1 >> ${LOG} -if [ $? -ne 0 ]; then echo "haproxy restart failed" | tee -a ${LOG}; exit 1; fi - - -###################### -# Restart libra_worker -###################### -echo "Stopping libra_worker" | tee -a ${LOG} -killall libra_worker 2>&1 >> ${LOG} -#if [ $? -ne 0 ]; then echo "killing libra_worker failed" | tee -a ${LOG}; exit 1; fi - -echo "Starting libra_worker" | tee -a ${LOG} -/usr/bin/libra_worker -c /etc/libra.cfg 2>&1 >> ${LOG} -if [ $? -ne 0 ]; then echo "starting libra_worker failed" | tee -a ${LOG}; exit 1; fi - -exit 0 diff --git a/bin/upgrade_worker_V2toV3.sh b/bin/upgrade_worker_V2toV3.sh deleted file mode 100755 index 9eddb178..00000000 --- a/bin/upgrade_worker_V2toV3.sh +++ /dev/null @@ -1,114 +0,0 @@ -#!/bin/bash -############################################################################## -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################## - - -############################################################################## -# DESCRIPTION -# This script is used to manually upgrade a worker node running a 2.0 -# version of Libra to the 3.0 version. This is specifically targeted to -# Ubuntu nodes, but may work on other distributions, though that is -# untested. It makes some assumptions about the current setup. -# -# This script is designed to be safe to run multiple times, in case an -# error is encountered and it must be run again. -# -# EXIT VALUES -# 0 on success, 1 on error -############################################################################## - -if [ $USER != "root" ] -then - echo "Must be run as root user." - exit 1 -fi - -LOG="/tmp/update_node.log" - -if [ -e ${LOG} ] -then - rm -f ${LOG} -fi - -################################################# -# Update sudo privs by inserting '/usr/bin/chown' -################################################# -file="/etc/sudoers" -echo "Updating SUDO file $file" | tee -a ${LOG} - -sed -i.bak -e '/^%haproxy/ c\ -%haproxy ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /bin/chown' ${file} -if [ $? -ne 0 ] -then - echo "Edit of ${file} failed." | tee -a ${LOG} - exit 1 -fi - -if [ -e ${file}.bak ] -then - rm ${file}.bak -fi - -######################### -# Chown on haproxy socket -######################### -haproxysock="/var/run/haproxy-stats.socket" -echo "Doing chown of haproxy socket ${haproxysock}" | tee -a ${LOG} - -if [ -e ${haproxysock} ] -then - chown haproxy:haproxy ${haproxysock} - if [ $? -ne 0 ] - then - echo "chown on ${haproxysock} failed." | tee -a ${LOG} - exit 1 - fi -fi - - -########################## -# Edit current haproxy.cfg -########################## -haproxycfg="/etc/haproxy/haproxy.cfg" -echo "Updating HAProxy config file ${haproxycfg}" | tee -a ${LOG} - -if [ -e ${haproxycfg} ] -then - sed -i.bak -e '/stats socket/ c\ - stats socket /var/run/haproxy-stats.socket user haproxy group haproxy mode operator' ${haproxycfg} - if [ $? -ne 0 ] - then - echo "Editing ${haproxycfg} failed." | tee -a ${LOG} - exit 1 - fi -fi - -if [ -e ${haproxycfg}.bak ] -then - rm -f ${haproxycfg}.bak -fi - - -################# -# Restart haproxy -################# -echo "Restarting haproxy" | tee -a ${LOG} -service haproxy restart 2>&1 >> ${LOG} -if [ $? -ne 0 ]; then echo "haproxy restart failed" | tee -a ${LOG}; exit 1; fi - - -exit 0 diff --git a/build_pdf.sh b/build_pdf.sh deleted file mode 100755 index edf0d43f..00000000 --- a/build_pdf.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -python setup.py build_sphinx_latex -# Fix option double dashes in latex output -perl -i -pe 's/\\bfcode\{--(.*)\}/\\bfcode\{-\{\}-\1\}/g' build/sphinx/latex/*.tex -perl -i -pe 's/\\index\{(.*?)--(.*?)\}/\\index\{\1-\{\}-\2\}/g' build/sphinx/latex/*.tex -make -C build/sphinx/latex all-pdf diff --git a/doc/admin_api/about.rst b/doc/admin_api/about.rst deleted file mode 100644 index 3123352d..00000000 --- a/doc/admin_api/about.rst +++ /dev/null @@ -1,22 +0,0 @@ -Description -=========== - -Purpose -------- - -The Admin API server listens for REST+JSON connections to provide information -about the state of Libra to external systems. - -Additionally the Admin API has several schedulers which automatically maintain -the health of the Libra system and the connected Load Balancer devices. - -Design ------- - -Similar to the main API server it uses an Eventlet WSGI web server frontend -with Pecan+WSME to process requests. SQLAlchemy+MySQL is used to access the -data store. The main internal difference (apart from the API itself) is the -Admin API server doesn't use keystone or gearman. - -It spawns several scheduled threads to run tasks such as building new devices -for the pool, monitoring load balancer devices and maintaining IP addresses. diff --git a/doc/admin_api/api.rst b/doc/admin_api/api.rst deleted file mode 100644 index 0a3ac9af..00000000 --- a/doc/admin_api/api.rst +++ /dev/null @@ -1,775 +0,0 @@ -Admin API REST Inteface (v2) -============================ - -Introduction ------------- -This is the new Admin API interface for the LBaaS system. It will allow the engineers as well as support teams to perform basic tasks on the LBaaS system without direct access using Salt, SSH or MySQL. It can also be used to automate tasks such as monitoring overall system health. - -Authentication & Security -------------------------- -Authentication will be performed in a similar way to the main API server, via. keystone to anyone registered to our service. There will be, however, one crucial addition. The database will contain a list of tenant IDs that can actually use the Admin API, anyone else will get a 401 response. This will also have two levels of access for now we will call 'staff' (USER) and 'administrators' (ADMIN). In addition to this the Admin API's port will be restricted to users on a VPN. - -Since this is an Admin API all actions should be well logged along with the tenantID of the user who actioned them. - -API Sections ------------- -The Admin API will initially be divided into three distinct sections, Devices, LoadBalancers and Status. Once we have per-customer defined limits a new section should be added to support that. In the table below the following conventions are used: - -{baseURI} - the endpoint address/IP for the Admin API server - -{ver} - The version number (1.0 already exists as a system Admin API, 2.0 shall be the first version) - -{lbID} - The load balancer ID - -{deviceID} - The device ID - -+---------------+----------------------------------+--------+---------------------------------------------+ -| Resource | Operation | Method | Path | -+===============+==================================+========+=============================================+ -| Devices | Get a list of devices | GET | {baseURI}/{ver}/devices | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Devices | Get a single device | GET | {baseURI}/{ver}/devices/{deviceID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Devices | Get a device version | GET | {baseURI}/{ver}/devices/{deviceID}/discover | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Devices | Deletes a device | DELETE | {baseURI}/{ver}/devices/{deviceID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| LoadBalancers | Get a list of load balancers | GET | {baseURI}/{ver}/loadbalancers | -+---------------+----------------------------------+--------+---------------------------------------------+ -| LoadBalancers | Gets a single load balancer | GET | {baseURI}/{ver}/loadbalancers/{lbID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| LoadBalancers | Delete a single load balancer | DELETE | {baseURI}/{ver}/loadbalancers/{lbID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Status | Get a pool status | GET | {baseURI}/{ver}/status/pool | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Status | Get the counters | GET | {baseURI}/{ver}/status/counters | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Status | Get a service status | GET | {baseURI}/{ver}/status/service | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Status | Get the global service limits | GET | {baseURI}/{ver}/status/limits | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Status | Change the global service limits | PUT | {baseURI}/{ver}/status/limits | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Status | Get a tenant's service limits | GET | {baseURI}/{ver}/status/limits/{tenantID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| Status | Change a tenant's service limits | PUT | {baseURI}/{ver}/status/limits/{tenantID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| User | Get a list of Admin API users | GET | {baseURI}/{ver}/user | -+---------------+----------------------------------+--------+---------------------------------------------+ -| User | Get an Admin API user | GET | {baseURI}/{ver}/user/{tenantID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| User | Delete an Admin API user | DELETE | {baseURI}/{ver}/user/{tenantID} | -+---------------+----------------------------------+--------+---------------------------------------------+ -| User | Add an Admin API user | POST | {baseURI}/{ver}/user | -+---------------+----------------------------------+--------+---------------------------------------------+ -| User | Modify an Admin API user | PUT | {baseURI}/{ver}/user/{tenantID} | -+---------------+----------------------------------+--------+---------------------------------------------+ - -Get a list of devices ---------------------- -This will be used to get either a whole list of devices or a filtered list given certain criteria. A future expansion to this would be to add pagination support. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/devices - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -* status - A specified status type to filter by such as 'OFFLINE', 'ONLINE' or 'ERROR' -* name - A specified device name (in a future version we could accept wildcards) -* ip - A specified device ip address (in a future version we could accept ranges) -* vip - A specified floating ip address (in a future version we could accept ranges) - -Response Example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - {"devices": [ - { - "id": 123, - "name": "7908c1f2-1bce-11e3-bcd3-fa163e9790b4", - "status": "OFFLINE", - "ip": "15.125.30.123", - "vip": null, - "created": "2013-05-12 12:13:54", - "updated": "2013-06-02 14:21:31" - } - ]} - -Get a single device -------------------- -This will be used to get details of a single device specified by its ID. This will contain additional information such as load balancers attached to a given device. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/devices/{id} - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error), 404 (Not found) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Not applicable - -Response Example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "id": 123, - "name": "7908c1f2-1bce-11e3-bcd3-fa163e9790b4", - "status": "ONLINE", - "ip": "15.125.30.123", - "vip": "15.125.50.45", - "created": "2013-05-12 12:13:54", - "updated": "2013-06-02 14:21:31", - "loadBalancers": [ - { - "id": 5263 - } - ] - } - -Get a device version --------------------- -This will be used to send a DISCOVER gearman message to a given device's worker and get its version response. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/devices/{id}/discover - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error), 404 (Not found) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Not applicable - -Response Example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "id": 123, - "version": "1.0", - "release": "1.0.alpha.3.gca84083" - } - -Delete a device ---------------- -This will be used to delete a device, if the device has load balancers attached these will be moved to a new device. Typically this could be used for worker upgrades, going through each device rebuilding it using a a pool with newer workers. If there are no load balancers attached it should just mark the device for deletion, in this scenario a 204 with empty body will be returned. - -Request type -^^^^^^^^^^^^ -DELETE - -Path -^^^^ -/v2.0/devices/{id} - -Access -^^^^^^ -It should be available to 'administrators' only. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 or 204 - -Failure: 400 (Bad request), 500 (Service error), 404 (Not found) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Not applicable - -Response Example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "oldId": 123, - "newId": 148 - } - -Get a list of LoadBalancers ---------------------------- -This will be used to get a list of all load balancers or a filtered list using given criteria. A future expansion to this would be to add pagination support. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/loadbalancers - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -* status - A specified status type to filter by such as 'ACTIVE', 'DEGRADED' or 'ERROR' -* tenant - The tenant/project ID for a given customer -* name - A specified device name (in a future version we could accept wildcards) -* ip - A specified device ip address (in a future version we could accept ranges) -* vip - A specified floating ip address (in a future version we could accept ranges) - -Response Example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - {"loadBalancers": [ - { - "id": 4561, - "name": "my load balancer", - "status": "ACTIVE", - "tenant": 8637027649, - "vip": "15.125.30.123", - "protocol": "HTTP", - "algorithm": "ROUND_ROBIN", - "port": 80, - "created": "2013-05-12 12:13:54", - "updated": "2013-06-02 14:21:31" - } - ]} - -Get a single LoadBalancer -------------------------- -This will be used to get details of a single load balancer specified by its ID. This will contain additional information such as nodes attached to the load balancer and which device is used. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/loadbalancers/{id} - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error), 404 (Not found) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Not applicable - -Response Example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "id": 4561, - "name": "my load balancer", - "status": "ACTIVE", - "tenant": 8637027649, - "vip": "15.125.30.123", - "protocol": "HTTP", - "algorithm": "ROUND_ROBIN", - "port": 80, - "device": 123, - "created": "2013-05-12 12:13:54", - "updated": "2013-06-02 14:21:31", - "nodes": [ - { - "ip": "15.185.23.157", - "port": 80, - "weight": 1, - "enabled": true, - "status": "ONLINE" - } - ], - "monitor": { - "type": "HTTP", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2", - "path": "/healthcheck" - } - } - -Delete a single LoadBalancer (NOT IMPLEMENTED!) ------------------------------------------------ -This will be used to delete a single load balancer in the same way a given user would. - -Request type -^^^^^^^^^^^^ -DELETE - -Path -^^^^ -/v2.0/loadbalancers/{id} - -Access -^^^^^^ -It should be available to 'administrators' only. - -Response codes -^^^^^^^^^^^^^^ -Success: 204 - -Failure: 400 (Bad request), 500 (Service error), 404 (Not found) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Not applicable - -Get pool status ---------------- -This is used to get an overview of the current status of the load balancer pool - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/status/pool - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Query parameters supported -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Not applicable - -Response Example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "devices": { - "used": 325, - "available": 50, - "error": 3, - "pendingDelete": 2 - }, - "vips": { - "used": 325, - "available": 15, - "bad" 2 - } - } - -Get counters ------------- -This is used to get the current counters from the API server. There is no reset for this at the moment so this is from the first installation of a version of the API supporting counters. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/status/counters - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Response example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - [ - { - "name": "loadbalancers_rebuild", - "value": 10 - }, - { - "name": "loadbalancers_error", - "value": 0 - } - ] - -Get service status ------------------- -This is used to get the health of vital service components. It will initially test all MySQL and Gearman servers to see if they are online. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/status/service - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Response example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "mysql": [ - { - "ip": "15.185.14.125", - "status": "ONLINE" - } - ], - "gearman": [ - { - "ip": "15.185.14.75", - "status": "OFFLINE" - } - ] - } - -Get global service limits -------------------------- -This is used to get the defined global limits (executed per-tenant) of the service. - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/status/limits - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Response example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "maxLoadBalancerNameLength": 128, - "maxVIPsPerLoadBalancer": 1, - "maxNodesPerLoadBalancer": 50, - "maxLoadBalancers": 20 - } - -Change global service limits ----------------------------- -This is used to modify the global limits of the service. It can be used to modify maxLoadBalancerNameLength, maxVIPsPerLoadBalancer, maxNodesPerLoadBalancer and/or maxLoadBalancers. - -Request type -^^^^^^^^^^^^ -PUT - -Path -^^^^ -/v2.0/status/limits - -Access -^^^^^^ -It should be available to 'administrators' only. - -Request body example -^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "maxNodesPerLoadBalancer": 75 - } - -Response codes -^^^^^^^^^^^^^^ -Success: 204 - -Failure: 400 (Bad request), 500 (Service error) - -Get a tenant's service limits ------------------------------ -This is used to get individual tenant limits of the service (currently only maxLoadBalancers). - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/status/limits/{tenantID} - -Access -^^^^^^ -It should be available to both 'staff' and 'administrators'. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Response example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "maxLoadBalancers": 20 - } - -Change a tenant's service limits --------------------------------- -This is used to modify a tenant's limits of the service, if there is no current individual tenant limit a new one will be set. It can currently be used to modify maxLoadBalancers only. - -Request type -^^^^^^^^^^^^ -PUT - -Path -^^^^ -/v2.0/status/limits/{tenantID} - -Access -^^^^^^ -It should be available to 'administrators' only. - -Request body example -^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "maxLoadBalancers": 75 - } - -Response codes -^^^^^^^^^^^^^^ -Success: 204 - -Failure: 400 (Bad request), 500 (Service error) - -List Admin API users --------------------- -This is used to get a list of users for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/user - -Access -^^^^^^ -It should be available to 'administrators' only. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Response example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - [ - { - "tenant": "123456", - "level": "USER" - }, - { - "tenant": "654321", - "level": "ADMIN" - } - ] - -Get an Admin API user ---------------------- -This is used to get a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN - -Request type -^^^^^^^^^^^^ -GET - -Path -^^^^ -/v2.0/user/{tenantID} - -Access -^^^^^^ -It should be available to 'administrators' only. - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Response example -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "tenant": "123456", - "level": "USER" - } - -Delete an Admin API user ------------------------- -This is used to delete a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN - -Request type -^^^^^^^^^^^^ -DELETE - -Path -^^^^ -/v2.0/user/{tenantID} - -Access -^^^^^^ -It should be available to 'administrators' only. - -Response codes -^^^^^^^^^^^^^^ -Success: 204 - -Failure: 400 (Bad request), 500 (Service error) - -Add an Admin API user ---------------------- -This is used to add a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN - -Request type -^^^^^^^^^^^^ -POST - -Path -^^^^ -/v2.0/user - -Access -^^^^^^ -It should be available to 'administrators' only. - -Request body example -^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "tenant": 654321, - "level": "ADMIN" - } - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) - -Modify an Admin API user ------------------------- -This is used to modify a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN - -Request type -^^^^^^^^^^^^ -POST - -Path -^^^^ -/v2.0/user/{tenantID} - -Access -^^^^^^ -It should be available to 'administrators' only. - -Request body example -^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "level": "ADMIN" - } - -Response codes -^^^^^^^^^^^^^^ -Success: 200 - -Failure: 400 (Bad request), 500 (Service error) diff --git a/doc/admin_api/config.rst b/doc/admin_api/config.rst deleted file mode 100644 index 2effd8f6..00000000 --- a/doc/admin_api/config.rst +++ /dev/null @@ -1,230 +0,0 @@ -Admin API Configuration -======================= - -These options are specific to the Admin API in addition to the -:doc:`common options `. - -Configuration File ------------------- - - The ``[admin_api]`` section is specific to the libra_admin_api utility. - Below is an example: - - .. code-block:: ini - - [admin_api] - db_section=mysql1 - ssl_certfile=/opt/server.crt - ssl_keyfile=/opt/server.key - gearman=127.0.0.1:4730 - keystone_module=keystoneclient.middleware.auth_token:AuthProtocol - - [mysql1] - host=localhost - port=3306 - username=root - password= - schema=lbaas - ssl_cert=/opt/mysql_cert.crt - ssl_key=/opt/mysql_key.key - ssl_ca=/opt/mysql_ca.ca - -Command Line Options --------------------- - .. program:: libra_admin_api - - .. option:: --host - - The IP address to bind the frontend to, default is 0.0.0.0 - - .. option:: --port - - The port number to listen on, default is 8889 - - .. option:: --disable_keystone - - Do not use keystone authentication, for testing purposes only - - .. option:: --db_sections - - Config file sections that describe the MySQL servers. This option can - be specified multiple times for Galera or NDB clusters. - - .. option:: --ssl_certfile - - The path for the SSL certificate file to be used for frontend of the API - server - - .. option:: --ssl_keyfile - - The path for the SSL key file to be used for the frontend of the API - server - - .. option:: --gearman_keepalive - - Use TCP KEEPALIVE to the Gearman job server. Not supported on all - systems. - - .. option:: --gearman_keepcnt - - Maximum number of TCP KEEPALIVE probes to send before killing the - connection to the Gearman job server. - - .. option:: --gearman_keepidle - - Seconds of idle time on the Gearman job server connection before - sending TCP KEEPALIVE probes. - - .. option:: --gearman_keepintvl - - Seconds between TCP KEEPALIVE probes. - - .. option:: --gearman_ssl_ca - - The path for the Gearman SSL Certificate Authority. - - .. option:: --gearman_ssl_cert - - The path for the Gearman SSL certificate. - - .. option:: --gearman_ssl_key - - The path for the Gearman SSL key. - - .. option:: --gearman - - Used to specify the Gearman job server hostname and port. This option - can be used multiple times to specify multiple job servers - - .. option:: --keystone_module - - A colon separated module and class to use as the keystone authentication - module. The class should be compatible with keystone's AuthProtocol - class. - - .. option:: --stats_driver - - The drivers to be used for alerting. This option can be used multiple - times to specift multiple drivers. - - .. option:: --stats_ping_timeout - - How often to run a ping check of load balancers (in seconds), default 60 - - .. option:: --stats_poll_timer - - How long to wait until we consider the initial ping check failed and - send a second ping. Default is 5 seconds. - - .. option:: --stats_poll_timeout_retry - - How long to wait until we consider the second and final ping check - failed. Default is 30 seconds. - - .. option:: --stats_offline_ping_limit - - How many times to ping an OFFLINE load balancer before considering - it unreachable and marking it for deletion. - - .. option:: --stats_device_error_limit - - Maximum number of simultaneous device failures to allow recovery on - - .. option:: --number_of_servers - - The number of Admin API servers in the system. - Used to calculate which Admin API server should stats ping next - - .. option:: --server_id - - The server ID of this server, used to calculate which Admin API - server should stats ping next (start at 0) - - .. option:: --datadog_api_key - - The API key to be used for the datadog driver - - .. option:: --datadog_app_key - - The Application key to be used for the datadog driver - - .. option:: --datadog_message_tail - - Some text to add at the end of an alerting message such as a list of - users to alert (using @user@email.com format), used for the datadog - driver. - - .. option:: --datadog_tags - - A list of tags to be used for the datadog driver - - .. option:: --node_pool_size - - The number of hot spare load balancer devices to keep in the pool, - default 10 - - .. option:: --vip_pool_size - - The number of hot spare floating IPs to keep in the pool, default 10 - - .. option:: --expire_days - - The number of days before DELETED load balancers are purged from the - database. The purge is run every 24 hours. Purge is not run if no - value is provided. - - .. option:: --stats_enable - - Enable / Disable usage statistics gathering - - .. option:: --exists_freq - - Minutes between sending of billing exists messages - - .. option:: --usage_freq - - Minutes between sending of billing usage messages - - .. option:: --stats_freqs - - Minutes between collecting usage statistics - - .. option:: --stats_purge_enable - - Enable / Disable purging of usage statistics - - .. option:: --stats_purge_days - - Number of days to keep usage statistics - - .. option:: --delete_timer_seconds - - Which second of each minute delete timer should run - - .. option:: --ping_timer_seconds - - Which second of each minute ping timer should run - - .. option:: --stats_timer_seconds - - Which second of each minute statistics timer should run - - .. option:: --usage_timer_seconds - - Which second of each minute usage timer should run - - .. option:: --probe_timer_seconds - - Which second of each minute probe timer should run - - .. option:: --offline_timer_seconds - - Which second of each minute offline timer should run - - .. option:: --vips_timer_seconds - - Which second of each minute vips timer should run - - .. option:: --exists_timer_seconds - - Which second of each minute exists timer should run diff --git a/doc/admin_api/index.rst b/doc/admin_api/index.rst deleted file mode 100644 index 4a200638..00000000 --- a/doc/admin_api/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _libra-admin-api: - -Libra Admin API Server -====================== - -.. toctree:: - :maxdepth: 2 - - about - config - schedulers - stats-drivers - api - v1api diff --git a/doc/admin_api/schedulers.rst b/doc/admin_api/schedulers.rst deleted file mode 100644 index fe6051d1..00000000 --- a/doc/admin_api/schedulers.rst +++ /dev/null @@ -1,93 +0,0 @@ -================ -Admin Schedulers -================ - -The Admin API has several schedulers to maintain the health of the Libra -system. This section of the document goes into detail about each one. - -Each Admin API server takes it in-turn to run these tasks. Which server is -next is determined by the :option:`--number_of_servers` and -:option:`--server_id` options. - -Stats Scheduler ---------------- - -This scheduler is actually a monitoring scheduler and at a later date will also -gather statistics for billing purposes. It is executed once a minute. - -It sends a gearman 'ping' message to active Load Balancer device. There are three -possible outcomes from the results: - -It has support for multiple different :doc:`stats-drivers`. - -#. If all is good, no action is taken -#. If a node connected to a load balancer has failed the node is marked as - ERROR and the load balancer is marked as DEGRADED -#. If a device has failed the device will automatically be rebuilt on a new - device and the associated floating IP will be re-pointed to that device. The - old device will be marked for deletion. - - -Rebuild (AutoFailover) -********************** - -Libra LBaaS supports auto-failover or auto-rebuild of a broken :term:`device`. - -This basically means typically re-allocating / re-building the :term:`device` to a new :term:`device`. - -1. A ping is sent to each :term:`device` (ping_lbs > _exec_ping) -2. Send failures to drivers (_exec_ping > _send_fails) -3. Driver does - - #. Marks the :term:`device` as being in ERROR state. - #. Triggers a rebuild - #. Looks for a free :term:`device` that is in OFFLINE state in the db. - #. Assigns the failed :term:`device` to the OFFLINE :term:`device` - #. Assigns the :term:`vip` to the new :term:`device` - #. Marks :term:`device` as DELETED - #. Puts the new :term:`device` into ACTIVE in the db. - -4. A scheduled function remove the :term:`device` from DB and unconfigures it. -5. A scheduled function ensures that there are standby :term:`device` in the pool. - - -Delete Scheduler ----------------- - -This scheduler looks out for any devices marked for deletion after use or after -an error state. It is executed once a minute. - -It sends a gearman message to the Pool Manager to delete any devices that are -to be deleted and removes them from the database. - -Create Scheduler ----------------- - -This scheduler takes a look at the number of hot spare devices available. It -is executed once a minute (after the delete scheduler). - -If the number of available hot spare devices falls below the value specified by -:option:`--node_pool_size` it will request that new devices are built and those -devices will be added to the database. It records how many are currently being -built so long build times don't mean multiple Admin APIs are trying to fulfil -the same quota. - -VIP Scheduler -------------- - -This scheduler takes a look at the number of hot spare floating IPs available. -It is executed once a minute. - -If the number of available floating IP address falls below the value specified -by :option:`vip_pool_size` it will request that new IPs are build and those -will be added to the database. - -Expunge Scheduler ------------------ - -This scheduler removes logical Load Balancers marked as DELETED from the -database. It is executed once a day. - -The DELETED logical Load Balancers remain in the database mainly for billing -purposes. This clears out any that were deleted after the number of days -specified by :option:`--expire-days`. diff --git a/doc/admin_api/stats-drivers.rst b/doc/admin_api/stats-drivers.rst deleted file mode 100644 index d3802b88..00000000 --- a/doc/admin_api/stats-drivers.rst +++ /dev/null @@ -1,67 +0,0 @@ -.. stats-drivers: - -============= -Stats Drivers -============= -The Stats scheduler has support for multiple different drivers. - -A typical driver has support for 3 different things: - -* Sending a alert -* Sending a change -* Sending a delete - -One can divide what a driver does into different areas: - -* Alerting - Example Datadog -* Remediation - example: Database -* Stats - Example Datadog - - -Dummy ------ - -A dummy driver which simply logs the above actions. - - -Database --------- - -This is not a typical driver. It provides functionality such as triggering -rebuilds of failed devices, marking devices as deleted and changing node states in -the db. - -Alert -***** - -When receiving a alert it does the following: - -# Marks the node with ERROR in the database -# Triggers a rebuild of the device (AutoFailover / AF) - - -Delete -****** - -Marks the device as DELETED in the Database - -Change -****** - -Change the state of the device in the database - - -Datadog -------- - -A plugin to provide functionality towards http://www.datadoghq.com/ for alerting. - -Alert -***** - -Send a failure alert up to Datadog - -Delete -****** - -Send a message about a device being down / unreachable. \ No newline at end of file diff --git a/doc/admin_api/v1api.rst b/doc/admin_api/v1api.rst deleted file mode 100644 index 2589a751..00000000 --- a/doc/admin_api/v1api.rst +++ /dev/null @@ -1,378 +0,0 @@ -LBaaS Device API (v1, DEPRECATED) -================================= - -Description ------------ - -The LBaaS service provides two classes of APIs including a tenant facing -API and admin API. The admin API is designed for internal usage to allow -administration of the LBaaS service itself. As part of this, the *Device -API* allows for managing devices which are the actual load balancer -devices used by LBaaS. - -API Overview ------------- - -The device API is not visible to tenants thus it is designed to operate -on its own HTTPS port which is configurable. The device API only -supports a JSON resource representation for reading and writing. The API -is designed as a RESTful API including support of CRUD operations for -creating, reading, updating and deleting devices. - -Base URL and port -^^^^^^^^^^^^^^^^^ - -All device API calls run on the same TCP port and require HTTPS for -access. The specific HTTPS port and certificate are configurable by the -LBaaS service and will comply with the Cloud security requirements -including the certificate signing. The API is version'ed such that all -calls are prefixed with a version URI. For example, - -``https://lbaas-service:8889/v1/devices/...`` - -would access the LBaaS system hosted on lbaas-service, using HTTPS on -port 8889 using version 1 of the API. - -Exceptions -^^^^^^^^^^ - -As a RESTful service, the device API can return standard HTTP status -codes with each request including success and error codes mentioned -below. In the event a non 200 series status is returned, a JSON -formatted error body is provided with additional details. The format of -the JSON error body is as follows: - -*Example of a bad request JSON error response body* - -:: - - { - "message":"Bad Request", - "details":"device name : lbaas-10.5.251.48 already exists", - "code":400 - } - -Base URI -^^^^^^^^ - -All LBaaS Device API calls have a common base URI defined as follows: - -`` = https://:/v1`` - -- *lbaas-system-addr* is the system name / address where the LBaaS API - service is running. - -- *lbaas-device-port* is the TCP port in which the device service is - listening for HTTPS REST requests. - -- */v1/devices* will prefix all REST calls. - -Device Data Model -^^^^^^^^^^^^^^^^^ - -Device REST calls allow reading and writing device resources represented -in JSON. The data model for devices is defined as follows: - -id -^^ - -*id* is an integer representing a unique id for the device. *id* is -created by the LBaaS service when devices are created. *id* is used to -reference devices as the REST collection id. - -updated -^^^^^^^ - -*updated* is a text string representing the last time this device -resource was updated. - -created -^^^^^^^ - -*created* is a text string representing when the device was created. - -status -^^^^^^ - -*status* is a text string representing the status of the device as -reported by the device to the LBaaS service ( this is done through the -gearman client / worker interface ). Status values can be 'OFFLINE', -'ONLINE', 'ERROR'. - -address -^^^^^^^ - -*address* is the IPv4 or IPV6 address of the device. This is the adress -which will be used as the loadbalancer's address used by the customer. -Note, this should be a Nova floating IP address for usage with HAProxy -on Nova. - -name -^^^^ - -*name* is the name of the device which is used internally by LBaaS as -the gearman worker name. Each device name is specified by the pool -manager and must be unique for each device. The format of the name is -``lbaas--`` where ```` is the gearman worker -version e.g. *v1* and ```` is a unique UUID for the name. - -loadbalancer -^^^^^^^^^^^^ - -*loadbalancer* are references to logical loadbalancers who are using -this device. This is a list of one or more integers. An empty or zero -value denotes that this device is not used and is free. Note, if the -device is not in use, it has no customer loadbalancer config and is in a -'OFFLINE' state. - -type -^^^^ - -*type* is a text string describing the type of device. Currently only -'HAProxy' is supported. - -Example of a single device -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:: - - { - "id": 1, - "updated": "2013-06-10T14:29:14", - "created": "2013-06-10T14:29:14", - "status": "OFFLINE", - "floatingIpAddress": "15.185.96.125", - "publicIpAddress": "15.185.96.125", - "name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00", - "loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}], - "type": "basename: libra-haproxy, image: 12345", - "az": 2 - } - -Operations -========== - -Get all Devices ---------------- - -Get all devices currently defined. - -:: - - GET /devices - -Return Status -^^^^^^^^^^^^^ - -200 on success, 500 for internal error - -Example -^^^^^^^ - -:: - - curl -k https://15.185.107.220:8889/v1/devices - -Response: - -:: - - { - "devices": [ - { - "id": 1, - "updated": "2013-06-10T14:29:14", - "created": "2013-06-10T14:29:14", - "status": "OFFLINE", - "floatingIpAddress ":"15.185.96.125", - "publicIpAddress": "15.185.96.125", - "name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00", - "loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}], - "type": "basename: libra-haproxy, image: 12345", - "az": 2 - } - ] - } - -Get a Device ------------- - -Get a specific device. - -:: - - GET /devices/{deviceId} - -Return Status -^^^^^^^^^^^^^ - -200 on success, 404 not found, 500 for internal error - -Example -^^^^^^^ - -:: - - curl -k https://15.185.107.220:8889/v1/devices/1 - -Response: - -:: - - { - "id": 1, - "updated": "2013-06-10T14:29:14", - "created": "2013-06-10T14:29:14", - "status": "OFFLINE", - "floatingIpAddress": "15.185.96.125", - "publicIpAddress": "15.185.96.125", - "name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00", - "loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}], - "type": "basename: libra-haproxy, image: 12345", - "az": 2 - } - -Create a Device ---------------- - -Create a new device will register an already deployed device with the -LBaaS service. In order to do so, LBaaS will need to know its name and -address. Returned will be the new device including its *id*. - -:: - - POST /devices - -Return Status -^^^^^^^^^^^^^ - -200 on success, 400 bad request, 500 for internal error - -Request Body -^^^^^^^^^^^^ - -A JSON request body is required for this request. - -:: - - { - "name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00", - "publicIpAddress": "15.185.96.125", - "floatingIpAddress": "15.185.96.125", - "az": 2, - "type": "basename: libra-haproxy, image: 12345" - } - -Example -^^^^^^^ - -:: - - curl -X POST -H "Content-type:application/json" --data-binary "@device.json" -k https://15.185.107.220:8889/v1/devices - -Response: - -:: - - { - "id": 1, - "updated": "2013-06-10T14:29:14", - "created": "2013-06-10T14:29:14", - "status": "OFFLINE", - "floatingIpAddress": "15.185.96.125", - "publicIpAddress": "15.185.96.125", - "name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00", - "loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}], - "type": "basename: libra-haproxy, image: 12345", - "az": 2 - } - -Delete a Device ---------------- - -Delete a device will delete a device from the LBaaS service. Note, this -call can be dangerous and effect a customers load balancer if it is in -use. *please use this call with extreme caution!*. - -:: - - DELETE /devices/{deviceId} - -Return Status -^^^^^^^^^^^^^ - -204 on success, 400 bad request, 500 for internal error - -Example -^^^^^^^ - -:: - - curl -X DELETE -k https://15.185.107.220:8889/v1/devices/1 - -Update a Device ---------------- - -Update the status of a device, it can set the status to `ERROR` or `ONLINE` -and the statusDescription field. No other fields can be changed and will be -ignored. - -:: - - PUT /devices/{deviceId} - -Return Status -^^^^^^^^^^^^^ - -200 on success, 400 bad request, 500 for internal error - -Request Body -^^^^^^^^^^^^ - -A JSON request body is required for this request. - -:: - - { - "status": "ERROR", - "statusDescription": "Load Balancer has failed" - } - -Example -^^^^^^^ - -:: - - curl -X PUT -H "Content-type:application/json" --data-binary "@device.json" -k https://15.185.107.220:8889/v1/devices/1 - -Get Usage of Devices --------------------- - -This call allows obtaining usage summary information for all devices. - -:: - - GET /devices/usage - -Return Status -^^^^^^^^^^^^^ - -200 on success, 500 for internal error - -Example -^^^^^^^ - -:: - - curl -k https://15.185.107.220:8889/v1/devices/usage - -Response: - -:: - - { - "total": 100, - "free" : 50, - "taken": 50 - } - diff --git a/doc/api/about.rst b/doc/api/about.rst deleted file mode 100644 index 303880a9..00000000 --- a/doc/api/about.rst +++ /dev/null @@ -1,16 +0,0 @@ -Description -=========== - -Purpose -------- - -The API server listens for REST+JSON connections to interface the user with -the LBaaS system. Its API is based on the Atlas API with a few slight -modifications. - -Design ------- - -It is designed to use Eventlet WSGI web server frontend and Pecan+WSME to -process the requests. SQLAlchemy+MySQL is used to store details of the load -balancers and Gearman is used to communicate to the workers. diff --git a/doc/api/api.rst b/doc/api/api.rst deleted file mode 100644 index 7c120a8b..00000000 --- a/doc/api/api.rst +++ /dev/null @@ -1,635 +0,0 @@ -Load Balancer as a Service (LBaaS) API Specification -==================================================== - - -.. toctree:: - :maxdepth: 2 - :glob: - - rest/* - -1. Overview ------------ - -This guide is intended for software developers who wish to create -applications using the Load Balancer as a Service (LBaaS) set -of APIs. It assumes the reader has a general understanding of cloud -APIs, load balancing concepts, RESTful web services, HTTP/1.1 -conventions and JSON serialization formats. The LBaaS set of APIs -utilize and take advantage of a variety of Openstack cloud API patterns -which are described in detail. - -1.1 API Maturity Level -~~~~~~~~~~~~~~~~~~~~~~ - -This API definition represents the Load Balancer as a Service -in Beta release form. - -**Maturity Level**: *Experimental* - -**Version API Status**: *BETA* - -2. Architecture View --------------------- - -2.1 Overview -~~~~~~~~~~~~ - -The Load Balancer as a Service (LBaaS) is a set of APIs that -provide a RESTful interface for the creation and management of load -balancers in the cloud. Load balancers created can be used for a variety -of purposes including load balancers for your external cloud hosted -services as well as internal load balancing needs. The load balancing -solution is meant to provide both load balancing and high availability -in an industry standard manner. The LBaaS APIs defined are integrated -within the API ecosystem including integration with the -identity management system, billing and monitoring systems. - -2.2 Conceptual/Logical Architecture View -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To use the Load Balancers API effectively, you should -understand several key concepts. - -2.2.1 Load Balancer -^^^^^^^^^^^^^^^^^^^ - -A load balancer is a logical device. It is used to distribute workloads -between multiple back-end systems or services called 'nodes', based on -the criteria defined as part of its configuration. - -2.2.2 Virtual IP Address -^^^^^^^^^^^^^^^^^^^^^^^^ - -A virtual IP address is an Internet Protocol (IP) address configured on the -load balancer for use by clients connecting to a service that is load -balanced. Incoming connections and requests are distributed to back-end -nodes based on the configuration of the load balancer. The load balancer will -need to registered with the appropriate DNS domain record in order for users -to access the nodes via a domain name-based URL. - -2.2.3 Node -^^^^^^^^^^ - -A node is a back-end device providing a service, like a web server or file -server, on a specified IP and port. - -The nodes defined by the load balancer are responsible for servicing the -requests received through the load balancers virtual IP. By default, the -load balancer employs a basic health check that ensures the node is -listening on its defined port. The node is checked at the time of -addition and at regular intervals as defined by the load balancer health -check configuration. If a back-end node is not listening on its port or -does not meet the conditions of the defined active health check for the -load balancer, then the load balancer will not forward connections or -requests to it and its status will be listed as OFFLINE. Only nodes that -are in an ONLINE status will receive and be able to service traffic from -the load balancer. - -Nodes can be assigned a weight attribute that determines the portion of -requests or connections it services compared to the other nodes of the load -balancer. For example, if node A has a weight of 2 and node B has a weight of 1, -then the loadbalancer will forward twice as many requests to node A than to -node B. If the weight attribute is not specified, then the node's weight is -implicitly set to "1". Weight values from 1 to 256 are allowed. - -Nodes that are assigned to a load balancer that is delivering data to a Galera -database cluster may require a primary write node be specified to avoid -database locking problems that can occur. For this case, a load balancer can be -configured to use the special "GALERA" protocol type. When a "GALERA" protocol -is chosen, all of the specified nodes must use the node "backup" attribute to -specify whether it is a backup node or the primary node. There may only be a -single primary node specified by setting the "backup" attribute to FALSE. All -other nodes must have the "backup" attribute set to TRUE. - - -2.2.4 Heath Monitors -~~~~~~~~~~~~~~~~~~~~ - -A health monitor is a configurable, active monitoring operation that exists for all load balancer nodes. In addition to the basic health checks, active health monitoring operations periodically check your back-end nodes to ensure they are responding correctly. - -Active health monitoring offers two choices for the type of monitor it can provide; CONNECT or HTTP. CONNECT monitoring is the most basic type of health check and it does not perform post-processing or protocol specific health checks. HTTP monitoring, on the other hand, is more intelligent and it is capable of processing HTTP responses to determine the condition of a node. For both options, a user may configure the time delay between monitoring checks, the timeout period for a connection to a node, the number of attempts before removing a node from rotation and for HTTP monitoring, the HTTP path to test. - -Active health monitoring, by default is configured to use CONNECT type monitoring with a 30 second delay, 30 second timeout, and 2 retries, and it can not be disabled. The caller may configure one health monitor per load balancer and the same configuration is used to monitor all of the back-end nodes. - -2.3 Infrastructure Architecture View -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -LBaaS fits into the ecosystem of APIs by utilizing the same common -authentication mechanisms as any other services. In order to -use LBaaS, a user account must have activated "Load Balancer" service. -All API calls require require a valid authentication token. - -3. Account-level View ---------------------- - -Once the account is activated, the LBaaS service will show up -in the service catalog returned during user login. In addition, LBaaS -endpoints to be used will also be presented. Availability zone -information may vary based on region. - -3.1 Service Catalog -~~~~~~~~~~~~~~~~~~~ - -Once a user authenticates using RESTful API, a service -catalog will list the availability of the LBaaS service, roles and -endpoints for the region you have logged into and in which you are -activated for. - -*The following is an example of LBaaS service information within the -service catalog including roles and endpoints:* - -:: - - "user": { - "id": "59267322167978", - "name": "lbaas_user", - "roles": [ - { - "id": "83241756956007", - "serviceId": "220", - "name": "lbaas-user", - "tenantId": "11223344556677" - }, - { - "id": "00000000004024", - "serviceId": "140", - "name": "user", - "tenantId": "11223344556677" - }, - { - "id": "00000000004013", - "serviceId": "130", - "name": "block-admin", - "tenantId": "11223344556677" - } - ] - }, - "serviceCatalog": [ - { - "name": "Identity", - "type": "identity", - "endpoints": [{ - "publicURL": "https:\/\/usa.region-b.geo-1.identity.hpcloudsvc.com:35357\/v2.0\/", - "region": "region-b.geo-1", - "versionId": "2.0", - "versionInfo": "https:\/\/usa.region-b.geo-1.identity-internal.hpcloudsvc.com:35357\/v2.0\/" - }] - }, - { - "name": "Load Balancer", - "type": "hpext:lbaas", - "endpoints": [{ - "tenantId": "11223344556677", - "publicURL": "https:\/\/usa.region-b.geo-1.lbaas.hpcloudsvc.com\/v1.1", - "publicURL2": "", - "region": "region-b.geo-1", - "versionId": "1.1", - "versionInfo": "https:\/\/usa.region-b.geo-1.lbaas.hpcloudsvc.com\/v1.1", - "versionList": "https:\/\/usa.region-b.geo-1.lbaas.hpcloudsvc.com" - }] - } - ] - -4. General API Information --------------------------- - -This section describes operations and guidelines that are common to all -LBaaS APIs. - -4.1 Authentication -~~~~~~~~~~~~~~~~~~ - -The LBaaS API uses standards defined by the OpenStack Keystone project -for authentication. Please refer to the -identity management system for more details on all authentication -methods currently supported. - -4.2 Service Access/Endpoints -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As shown in the example above, logging into your region will provide you -with the appropriate LBaaS endpoints to use. In addition, all supported -versions are published within the service catalog. A client may choose to -use any LBaaS API version listed. - -4.3 Request/Response Types -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The LBaaS API currently only supports JSON data serialization formats -for request and response bodies. The request format is specified using -the 'Content-Type' header and is required for operations that have a -request body. The response format should be specified in requests using -the 'Accept' header. If no response format is specified, JSON is the -default. - -4.4 Persistent Connections -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default, the API supports persistent connections via HTTP/1.1 -'keep-alives'. All connections will be kept alive unless the connection -header is set to close. In adherence with the IETF HTTP RFCs, the server -may close the connection at any time and clients should not rely on this -behavior. - -4.5 Absolute Limits -~~~~~~~~~~~~~~~~~~~ - -Absolute limits are limits which prohibit a user from creating too many -LBaaS resources. For example, 'maxNodesPerLoadbalancer' identifies the -total number of nodes that may be associated with a given load balancer. -Limits for a specific tenant may be queried for using the 'GET /limits' -API. This will return the limit values which apply to the tenant who -made the request. - -+-----------------------------+------------------------------------------------------------+ -| Limited Resource | Description | -+=============================+============================================================+ -| maxLoadBalancers | Maximum number of load balancers allowed for this tenant | -+-----------------------------+------------------------------------------------------------+ -| maxNodesPerLoadBalancer | Maximum number of nodes allowed for each load balancer | -+-----------------------------+------------------------------------------------------------+ -| maxLoadBalancerNameLength | Maximum length allowed for a load balancer name | -+-----------------------------+------------------------------------------------------------+ -| maxVIPsPerLoadBalancer | Maximum number of Virtual IPs for each load balancer | -+-----------------------------+------------------------------------------------------------+ - -4.6 Faults -~~~~~~~~~~ - -When issuing a LBaaS API request, it is possible that an error can -occur. In these cases, the system will return an HTTP error response -code denoting the type of error and a LBaaS response body with -additional details regarding the error. Specific HTTP status codes -possible are listed in each API definition. - -*The following JSON message represents the JSON response body used for -all faults:* - -:: - - { - "message":"Description of fault", - "details":"Details of fault", - "code": HTTP standard error status - } - -4.7 Specifying Tenant IDs -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tenant identifiers with LBaaS API URIs are not required. The tenant -identifier is derived from the Openstack Keystone authentication token -provided with each API call. This simplifies the REST URIs to only -include the base URI and the resource. All -LBaaS calls behave in this manner. - -5. LBaaS API Resources and Methods ----------------------------------- - -The following is a summary of all supported LBaaS API resources and -methods. Each resource and method is defined in detail in the subsequent -sections. - -**Derived resource identifiers:** -i -**{baseURI}** is the endpoint URI returned in the service catalog upon -logging in including the protocol, endpoint and base URI. - -**{ver}** is the specific version URI returned as part of the service -catalog. - -**{loadbalancerId}** is the unique identifier for a load balancer -returned by the LBaaS service. - -**{nodeId}** is the unique identifier for a load balancer node returned -by the LBaaS service. - -5.1 LBaaS API Summary Table -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Resource | Operation | Method | Path | -+=================+============================================================+==========+=================================================================+ -| Versions | :ref:`Get list of all API versions ` | GET | {baseURI}/ | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Versions | :ref:`Get specific API version ` | GET | {baseURI}/{ver} | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Limits | :ref:`Get list of LBaaS limits ` | GET | {baseURI}/{ver}/limits | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Protocols | :ref:`Get list of supported protocols ` | GET | {baseURI}/{ver}/protocols | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Algorithms | :ref:`Get list of supported algorithms ` | GET | {baseURI}/{ver}/algorithms | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Load Balancer | :ref:`Get list of all load balancers ` | GET | {baseURI}/{ver}/loadbalancers | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Load Balancer | :ref:`Get load balancer details ` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId} | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Load Balancer | :ref:`Create a new load balancer ` | POST | {baseURI}/{ver}/loadbalancers | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Load Balancer | :ref:`Update load balancer attributes ` | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId} | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Load Balancer | :ref:`Delete an existing load balancer ` | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId} | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Node | :ref:`Get list of load balancer nodes ` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Node | :ref:`Get a specific load balancer node ` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Node | :ref:`Create a new load balancer node ` | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Node | :ref:`Update a load balancer node ` | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Node | :ref:`Delete a load balancer node ` | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Virtual IP | :ref:`Get list of virtual IPs ` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/virtualips | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Logs | :ref:`Archive log file to Object Storage ` | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/logs | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Health Monitor | :ref:`Get a load balancer monitor ` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Health Monitor | :ref:`Update a load balancer monitor ` | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ -| Health Monitor | :ref:`Reset a load balancer monitor ` | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor | -+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+ - -5.2 Common Request Headers -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -*HTTP standard request headers* - -**Accept** - Internet media types that are acceptable in the response. -LBaaS API supports the media type 'application/json'. - -**Content-Length** - The length of the request body in octets (8-bit -bytes). - -**Content-Type** - The Internet media type of the request body. Used -with POST and PUT requests. LBaaS API supports -'application/json'. - -*Non-standard request headers* - -**X-Auth-Token** - authorization token. - -*Example* - -:: - - GET /v1.0/loadbalancers HTTP/1.1 - Host: system.hpcloudsvc.com - Content-Type: application/json - Accept: application/json - X-Auth-Token: TOKEN - Content-Length: 85 - -5.3 Common Response Headers -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -*HTTP standard response headers* - -**Content-Type** - Internet media type of the response body. - -**Date** - The date and time that the response was sent. - -*Example* - -:: - - HTTP/1.1 200 OK - Content-Length: 1135 - Content-Type: application/json; charset=UTF-8 - Date: Tue, 30 Oct 2012 16:22:35 GMT - -.. _api-versions: - -6. Get a List of All LBaaS API Versions Supported -------------------------------------------------- - -6.1 Operation -~~~~~~~~~~~~~ - -+------------+--------------------------------+----------+--------------+ -| Resource | Operation | Method | Path | -+============+================================+==========+==============+ -| Versions | Get list of all API versions | GET | {baseURI}/ | -+------------+--------------------------------+----------+--------------+ - -6.2 Description -~~~~~~~~~~~~~~~ - -This method allows querying the LBaaS service for all supported versions -it supports. This method is also advertised within the Keystone service -catalog which is presented upon user login. All versions listed can be -used for LBaaS. - -6.3 Request Data -~~~~~~~~~~~~~~~~ - -None required. - -6.4 Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -6.5 Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -6.6 Request Body -~~~~~~~~~~~~~~~~ - -None required. - -6.7 Normal Response Code -~~~~~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -6.8 Response Body -~~~~~~~~~~~~~~~~~ - -The response body contains a list of all supported versions of LBaaS. - -6.9 Error Response Codes -~~~~~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -6.10 Example -~~~~~~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com - -**Response** - -:: - - { - "versions": [ - { - "id": "v1.1", - "links": [ - { - "href": "http://api-docs.hpcloud.com", - "rel": "self" - } - ], - "status": "CURRENT", - "updated": "2012-12-18T18:30:02.25Z" - } - ] - } - -.. _api-version: - -7. Get Specific LBaaS API Version Information ---------------------------------------------- - -7.1 Operation -~~~~~~~~~~~~~ - -+------------+----------------------------+----------+-------------------+ -| Resource | Operation | Method | Path | -+============+============================+==========+===================+ -| Versions | Get specific API version | GET | {baseURI}/{ver} | -+------------+----------------------------+----------+-------------------+ - -7.2 Description -~~~~~~~~~~~~~~~ - -This method allows querying the LBaaS service for information regarding -a specific version of the LBaaS API. This method is also advertised -within the Keystone service catalog which is presented upon user login. - -7.3 Request Data -~~~~~~~~~~~~~~~~ - -None required. - -7.4 Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -7.5 Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -7.6 Request Body -~~~~~~~~~~~~~~~~ - -None required. - -7.7 Normal Response Code -~~~~~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -7.8 Response Body -~~~~~~~~~~~~~~~~~ - -The response body contains information regarding a specific LBaaS API -version. - -7.9 Error Response Codes -~~~~~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -7.10 Example -~~~~~~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1 - -**Response** - -:: - - { - "version": { - "id": "v1.1", - "links": [ - { - "href": "http://api-docs.hpcloud.com", - "rel": "self" - } - ], - "media-types": [ - { - "base": "application/json" - } - ], - "status": "CURRENT", - "updated": "2012-12-18T18:30:02.25Z" - } - } - - - -Features Currently Not Implemented or Supported ------------------------------------------------ - -The following features are not supported. - -* IPV6 address types are not supported. - -SSL ---- - -Supported -~~~~~~~~~ - -End-to-end HTTPS protocol support is currently provided by the TCP load balancer option. HTTPS-based traffic will flow between end-users and application server nodes via the TCP load balancer connection. - -* The same SSL certificate needs to be installed on each application server node. -* The same private key needs to be installed on each application server node. -* The SSL certificate needs to reference the load balancer fully qualified domain name (FQDN) or external IP address of the load balancer in the Subject CommonName(CN) or Subject Alternative - Name field of the certificate. The IP address of the servers behind the load balancer should not be used. - -Not supported -~~~~~~~~~~~~~ - -* SSL certificate termination on the load balancer -* HTTPS/SSL session affinity or "stickyness" \ No newline at end of file diff --git a/doc/api/config.rst b/doc/api/config.rst deleted file mode 100644 index bdaba0bc..00000000 --- a/doc/api/config.rst +++ /dev/null @@ -1,126 +0,0 @@ -API Configuration -================= - -These options are specific to the API in addition to the -:doc:`common options `. - -Configuration File ------------------- - - The ``[api]`` section is specific to the libra_api utility. Below is an - example: - - .. code-block:: ini - - [api] - db_sections=mysql1 - gearman=127.0.0.1:4730 - keystone_module=keystoneclient.middleware.auth_token:AuthProtocol - swift_basepath=lbaaslogs - swift_endpoint=https://host.com:443/v1/ - ssl_certfile=/opt/certfile.crt - ssl_keyfile=/opt/keyfile.key - - [mysql1] - host=localhost - port=3306 - username=root - password= - schema=lbaas - ssl_cert=/opt/mysql_cert.crt - ssl_key=/opt/mysql_key.key - ssl_ca=/opt/mysql_ca.ca - - In addition to this any options that are specific to the given keystone - module should be stored in the ``[keystone]`` section. - -Command Line Options --------------------- - .. program:: libra_api - - .. option:: --host - - The IP address to bind the frontend to, default is 0.0.0.0 - - .. option:: --port - - The port number to listen on, default is 443 - - .. option:: --disable_keystone - - Do not use keystone authentication, for testing purposes only - - .. option:: --db_secions - - Config file sections that describe the MySQL servers. This option can - be specified multiple times for Galera or NDB clusters. - - .. option:: --gearman - - Used to specify the Gearman job server hostname and port. This option - can be used multiple times to specify multiple job servers. - - .. option:: --gearman_keepalive - - Use TCP KEEPALIVE to the Gearman job server. Not supported on all - systems. - - .. option:: --gearman_keepcnt - - Maximum number of TCP KEEPALIVE probes to send before killing the - connection to the Gearman job server. - - .. option:: --gearman_keepidle - - Seconds of idle time on the Gearman job server connection before - sending TCP KEEPALIVE probes. - - .. option:: --gearman_keepintvl - - Seconds between TCP KEEPALIVE probes. - - .. option:: --gearman_ssl_ca - - The path for the Gearman SSL Certificate Authority - - .. option:: --gearman_ssl_cert - - The path for the Gearman SSL certificate - - .. option:: --gearman_ssl_key - - The path for the Gearman SSL key - - .. option:: --keystone_module - - A colon separated module and class to use as the keystone authentication - module. The class should be compatible with keystone's AuthProtocol - class. - - .. option:: --swift_basepath - - The default container to be used for customer log uploads. - - .. option:: --swift_endpoint - - The default endpoint for swift. The user's tenant ID will automatically - be appended to this unless overridden at the log archive request. - - .. option:: --ssl_certfile - - The path for the SSL certificate file to be used for frontend of the API - server - - .. option:: --ssl_keyfile - - The path for the SSL key file to be used for the frontend of the API - server - - .. option:: --ip_filters - - A mask of IP addresses to filter for backend nodes in the form - xxx.xxx.xxx.xxx/yy - - Any backend node IP address supplied which falls outside these filters - will result in an error for the create or node add functions. - This option can be specified multiple times. diff --git a/doc/api/index.rst b/doc/api/index.rst deleted file mode 100644 index 60ad452b..00000000 --- a/doc/api/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _libra-api: - -Libra API Server -================ - -.. toctree:: - :maxdepth: 2 - - about - config - api diff --git a/doc/api/rest/algorithms.rst b/doc/api/rest/algorithms.rst deleted file mode 100644 index a3a95c98..00000000 --- a/doc/api/rest/algorithms.rst +++ /dev/null @@ -1,116 +0,0 @@ -.. _api-algorithms: - -========== -Algorithms -========== - - -Get List Of Supported LBaaS Algorithms --------------------------------------- - -Operation -~~~~~~~~~ - -+--------------+------------------------------------+----------+------------------------------+ -| Resource | Operation | Method | Path | -+==============+====================================+==========+==============================+ -| Algorithms | Get list of supported algorithms | GET | {baseURI}/{ver}/algorithms | -+--------------+------------------------------------+----------+------------------------------+ - -Description -~~~~~~~~~~~ - -All load balancers utilize an algorithm that defines how traffic should -be directed between back end nodes. The default algorithm for newly -created load balancers is ROUND\_ROBIN, which can be overridden at -creation time or changed after the load balancer has been initially -provisioned. - -The algorithm name is to be constant within a major revision of the load -balancing API, though new algorithms may be created with a unique -algorithm name within a given major revision of this API. - -**Supported Algorithms** - -+----------------------+-------------------------------------------------------------------------+ -| Name | Description | -+======================+=========================================================================+ -| LEAST\_CONNECTIONS | The node with the lowest number of connections will receive requests. | -+----------------------+-------------------------------------------------------------------------+ -| ROUND\_ROBIN | Connections are routed to each of the back-end servers in turn. | -+----------------------+-------------------------------------------------------------------------+ - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the currently supported algorithms. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/algorithms - -**Response** - -:: - - { - "algorithms": [ - { - "name": "ROUND_ROBIN" - }, - { - "name": "LEAST_CONNECTIONS" - } - ] - } \ No newline at end of file diff --git a/doc/api/rest/health-monitor.rst b/doc/api/rest/health-monitor.rst deleted file mode 100644 index 780aa0e2..00000000 --- a/doc/api/rest/health-monitor.rst +++ /dev/null @@ -1,360 +0,0 @@ -.. _api-monitor: - -=============== -Health Monitors -=============== - - -.. _api-monitor-status: - -Get Load Balancer Health Monitor --------------------------------- - -Operation -~~~~~~~~~ - -+--------------------+------------------------------------------+-------+--------------------------------------------------------------+ -|Resource |Operation |Method |Path | -+====================+==========================================+=======+==============================================================+ -|Health Monitor |Get a load balancer health monitor |GET |{baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor | -+--------------------+------------------------------------------+-------+--------------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -This operation retrieves the current configuration of a load balancer health monitor. - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+------------------+---------------------+ -| HTTP Status Code | Description | -+==================+=====================+ -|200 |OK | -+------------------+---------------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the health monitor for the requested load balancer or 404, if not found. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+------------------+---------------------+ -| HTTP Status Code | Description | -+==================+=====================+ -|400 |Bad Request | -+------------------+---------------------+ -|401 |Unauthorized | -+------------------+---------------------+ -|404 |Not Found | -+------------------+---------------------+ -|405 |Not Allowed | -+------------------+---------------------+ -|500 |LBaaS Fault | -+------------------+---------------------+ - -Example -~~~~~~~ - -**Curl Example** - -:: - - curl -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token:HPAuth_d17efd" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/healthmonitor - -**Response** - -:: - - { - "type": "CONNECT", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2" - } - -or.. - -:: - - { - "type": "HTTP", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2", - "path": "/healthcheck" - } - - -.. _api-monitor-modify: - -Update Load Balancer Health Monitor -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operation -~~~~~~~~~ - -+--------------------+------------------------------------------+-------+--------------------------------------------------------------+ -|Resource |Operation |Method |Path | -+====================+==========================================+=======+==============================================================+ -|Health Monitor |Update a load balancer health monitor |PUT |{baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor | -+--------------------+------------------------------------------+-------+--------------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -Active health monitoring provides two types of health monitors, CONNECT or HTTP. The caller can configure one health monitor per load balancer. - -The health monitor has a type attribute to signify which types it is. The required atrributes for each type is as follows: - -**CONNECT Monitor** - -The monitor connects to each node on its defined port to ensure that the node is listening properly. - -The CONNECT monitor is the most basic type of health check and does not perform post-processing or protocol specific health checks. It includes several configurable properties: - -- delay: This is the minimum time in seconds between regular calls to a monitor. The default is 30 seconds. -- timeout: Maximum number of seconds for a monitor to wait for a connection to be established to the node before it times out. The value cannot be greater than the delay value. The default is 30 seconds. -- attemptsBeforeDeactivation: Number of permissible monitor failures before removing a node from rotation. Must be a number between 1 and 10. The default is 2 attempts. - -**HTTP Monitor** - -The HTTP monitor is more intelligent than the CONNECT monitor. It is capable of processing an HTTP response to determine the condition of a node. It supports the same basic properties as the CONNECT monitor and includes the additional attribute of path that is used to evaluate the HTTP response to a monitor probe. - -- path: The HTTP path used in the HTTP request by the monitor. This must be a string beginning with a / (forward slash). The monitor expects a response from the node with an HTTP status code of 200. - -The default Health Monitor Configuration, when a load balancer is created is: - -:: - - { - "type": "CONNECT", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2" - } - -Request Data -~~~~~~~~~~~~ - -Request data includes the desired configuration attributes of the health monitor. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -The request body includes the health monitor attributes. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+------------------+---------------------+ -| HTTP Status Code | Description | -+==================+=====================+ -|202 |Accepted | -+------------------+---------------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the health monitor requested - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+------------------+---------------------+ -| HTTP Status Code | Description | -+==================+=====================+ -|400 |Bad Request | -+------------------+---------------------+ -|401 |Unauthorized | -+------------------+---------------------+ -|404 |Not Found | -+------------------+---------------------+ -|405 |Not Allowed | -+------------------+---------------------+ -|500 |LBaaS Fault | -+------------------+---------------------+ - -Example -~~~~~~~ - -**Contents of Request file node.json** - -**Request** - -:: - - { - "type": "CONNECT", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2" - } - -or.. - -:: - - { - "type": "HTTP", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2", - "path": "/healthcheck" - } - -**Curl Request** - - curl -X PUT -H "X-Auth-Token:HPAuth_d17efd" --data-binary "@node.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/healthmonitor - -**Response** - -Status with the following response body. - -:: - - { - "type": "CONNECT", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2" - } - -or.. - -:: - - { - "type": "HTTP", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2", - "path": "/healthcheck" - } - - -.. _api-monitor-delete: - -Reset Load Balancer Health Monitor -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operation -~~~~~~~~~ -+--------------------+------------------------------------------+-------+--------------------------------------------------------------+ -|Resource |Operation |Method |Path | -+====================+==========================================+=======+==============================================================+ -|Health Monitor |Reset a load balancer health monitor |DELETE |{baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor | -+--------------------+------------------------------------------+-------+--------------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -Reset health monitor settings for a load balancer back to the following default configuration. - -:: - - { - "type": "CONNECT", - "delay": "30", - "timeout": "30", - "attemptsBeforeDeactivation": "2" - } - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+------------------+---------------------+ -| HTTP Status Code | Description | -+==================+=====================+ -|202 |Accepted | -+------------------+---------------------+ - -Response Body -~~~~~~~~~~~~~ - -None. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+------------------+---------------------+ -| HTTP Status Code | Description | -+==================+=====================+ -|400 |Bad Request | -+------------------+---------------------+ -|401 |Unauthorized | -+------------------+---------------------+ -|404 |Not Found | -+------------------+---------------------+ -|405 |Not Allowed | -+------------------+---------------------+ -|500 |LBaaS Fault | -+------------------+---------------------+ - - -Example -~~~~~~~ - - -**Curl Request** - -:: - - curl -X DELETE -H "X-Auth-Token:HPAuth_d17efd" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/healthmonitor - - -**Response** - -202 status with no response body. diff --git a/doc/api/rest/limits.rst b/doc/api/rest/limits.rst deleted file mode 100644 index c4714996..00000000 --- a/doc/api/rest/limits.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _api-limits: - -====== -Limits -====== - - -Get List of LBaaS API Limits ----------------------------- - -Operation -~~~~~~~~~~ - -+------------+----------------------------+----------+--------------------------+ -| Resource | Operation | Method | Path | -+============+============================+==========+==========================+ -| Limits | Get list of LBaaS limits | GET | {baseURI}/{ver}/limits | -+------------+----------------------------+----------+--------------------------+ - -Description -~~~~~~~~~~~ - -This method allows querying the LBaaS service for a list of API limits -which apply on a tenant basis. Each tenant may not utilize LBaaS API -resources exceeding these limits and will receive and over limit error -if attempted (413). - -+-----------------------------+------------------------------------------------------------+ -| Returned Limit Name | Value | -+=============================+============================================================+ -| maxLoadBalancers | Maximum number of load balancers allowed for this tenant | -+-----------------------------+------------------------------------------------------------+ -| maxNodesPerLoadBalancer | Maximum number of nodes allowed for each load balancer | -+-----------------------------+------------------------------------------------------------+ -| maxLoadBalancerNameLength | Maximum length allowed for a load balancer name | -+-----------------------------+------------------------------------------------------------+ -| maxVIPsPerLoadBalancer | Maximum number of Virtual IPs for each load balancer | -+-----------------------------+------------------------------------------------------------+ - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains information regarding limits imposed for the -tenant making the request. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/limits - -**Response** - -:: - - { - "limits": { - "absolute": { - "values": { - "maxLoadBalancerNameLength": 128, - "maxLoadBalancers": 20, - "maxNodesPerLoadBalancer": 5, - "maxVIPsPerLoadBalancer": 1 - } - } - } - } - diff --git a/doc/api/rest/load-balancer.rst b/doc/api/rest/load-balancer.rst deleted file mode 100644 index be541196..00000000 --- a/doc/api/rest/load-balancer.rst +++ /dev/null @@ -1,817 +0,0 @@ -.. api-lb: - -============= -Load Balancer -============= - - -.. _api-lb-list: - -Get List Of All Load Balancers ------------------------------- - -Operation -~~~~~~~~~ - -+-----------------+----------------------------------+----------+---------------------------------+ -| Resource | Operation | Method | Path | -+=================+==================================+==========+=================================+ -| Load Balancer | Get list of all load balancers | GET | {baseURI}/{ver}/loadbalancers | -+-----------------+----------------------------------+----------+---------------------------------+ - -Description -~~~~~~~~~~~ - -This operation provides a list of all load balancers configured and -associated with your account. This includes a summary of attributes for -each load balancer. In order to retrieve all the details for a load -balancer, an individual request for the load balancer must be made. - -This operation returns the following attributes for each load balancer: - -**id :** Unique identifier for the load balancer - -**name :** Creator-assigned name for the load balancer - -**algorithm :** Creator-specified algorithm for the load balancer - -**protocol :** Creator-specified protocol for the load balancer - -**port :** Creator-specified port for the load balancer - -**status :** Current status, see section on load balancer status within -load balancer create - -**created :** When the load balancer was created - -**updated :** When the load balancer was last updated - -**nodeCount :** The number of backend servers attached to this load balancer - -**options :** Current options are timeout (30 sec) and retries (3) for each load balancer - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~~~~~~ - -The response body contains a list of load balancers for the tenant -making the request. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers - -**Response** - -:: - - { - "loadBalancers":[ - { - "name":"lb-site1", - "id":"71", - "protocol":"HTTP", - "port":"80", - "algorithm":"LEAST_CONNECTIONS", - "status":"ACTIVE", - "created":"2010-11-30T03:23:42Z", - "updated":"2010-11-30T03:23:44Z", - "options": {"timeout": 30000, "retries": 3} - }, - { - "name":"lb-site2", - "id":"166", - "protocol":"TCP", - "port":"9123", - "algorithm":"ROUND_ROBIN", - "status":"ACTIVE", - "created":"2010-11-30T03:23:42Z", - "updated":"2010-11-30T03:23:44Z", - "options": {"timeout": 30000, "retries": 3} - } - ] - } - -.. _api-lb-status: - -Get Load Balancer Details -------------------------- - -Operation -~~~~~~~~~ - -+-----------------+--------------------------------+----------+--------------------------------------------------+ -| Resource | Operation | Method | Path | -+=================+================================+==========+==================================================+ -| Load Balancer | Get a specific load balancer | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId} | -+-----------------+--------------------------------+----------+--------------------------------------------------+ - -Description -~~~~~~~~~~~ - -This operation provides detailed description for a specific load -balancer configured and associated with your account. This operation is -not capable of returning details for a load balancer which has been -deleted. Details include load balancer virtual IP and node information. - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the load balancer requested or 404, if not -found. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/2000 - -**Response** - -:: - - { - "id": "2000", - "name":"sample-loadbalancer", - "protocol":"HTTP", - "port": "80", - "algorithm":"ROUND_ROBIN", - "status":"ACTIVE", - "created":"2010-11-30T03:23:42Z", - "updated":"2010-11-30T03:23:44Z", - "options": {"timeout": 30000, "retries": 3}, - "virtualIps":[ - { - "id": "1000", - "address":"192.168.1.1", - "type":"PUBLIC", - "ipVersion":"IPV4" - } - ], - "nodes": [ - { - "id": "1041", - "address":"10.1.1.1", - "port": "80", - "condition":"ENABLED", - "status":"ONLINE" - }, - { - "id": "1411", - "address":"10.1.1.2", - "port": "80", - "condition":"ENABLED", - "status":"ONLINE" - } - ], - } - -.. _api-lb-create: - -Create a New Load Balancer --------------------------- - -Operation -~~~~~~~~~ - -+-----------------+------------------------------+----------+---------------------------------+ -| Resource | Operation | Method | Path | -+=================+==============================+==========+=================================+ -| Load Balancer | Create a new load balancer | POST | {baseURI}/{ver}/loadbalancers | -+-----------------+------------------------------+----------+---------------------------------+ - -Description -~~~~~~~~~~~ - -This operation provisions a new load balancer based on the configuration -defined in the request object. Once the request is validated and -progress has started on the provisioning process, a response object will -be returned. The object will contain a unique identifier and status of -the request. - -If the status returned is set to 'BUILD', then using the identifier of -the load balancer, the caller can check on the progress of the creation -operation by performing a GET on loadbalancers/{loadbalancerId}. When -the status of the load balancer returned changes to 'ACTIVE', then the -load balancer has been successfully provisioned and is now operational. - -**Load Balancer Status Values** - -+-------------------+----------------------------------------------------------------+ -| Status Name | Description | -+===================+================================================================+ -| BUILD | Load balancer is in a building state and not yet operational | -+-------------------+----------------------------------------------------------------+ -| ACTIVE | Load balancer is in an operational state | -+-------------------+----------------------------------------------------------------+ -| PENDING\_UPDATE | Load balancer is in the process of an update | -+-------------------+----------------------------------------------------------------+ -| ERROR | Load balancer is in an error state and not operational | -+-------------------+----------------------------------------------------------------+ - -The caller of this operation must specify at least the following -attributes of the load balancer: - -\*name - -\*at least one node - -If the request cannot be fulfilled due to insufficient or invalid data, -an HTTP 400 (Bad Request) error response will be returned with -information regarding the nature of the failure in the body of the -response. Failures in the validation process are non-recoverable and -require the caller to correct the cause of the failure and POST the -request again. - -By default, the system will create a load balancer with protocol set to -HTTP, port set to 80 (or 443 if protocol is TCP), and assign a public -IPV4 address to the load balancer. There is also a third special-case -protocol "GALERA" that can be used to choose a primary write node when -the load balancer is being used to deliver data to a Galera database -cluster. The default load balancing algorithm used is set to ROUND\_ROBIN. - -The load balancer options consist of a 30 second timeout for client -connections (30,000ms) and 3 retries. Valid timeout values range from -0 to 1000 seconds (1,000,000 ms) with 0 indicating no timeout. retries can -range from 0 to 256. - -A load balancer name has a max length that can be determined by querying -limits. - -Users may configure all documented features of the load balancer at -creation time by simply providing the additional elements or attributes -in the request. This document provides an overview of all the features -the load balancing service supports. - -If you have at least one load balancer, you may create subsequent load -balancers that share a single virtual IP by issuing a POST and supplying -a virtual IP ID instead of a type. Additionally, this feature is highly -desirable if you wish to load balance both an unsecured and secure -protocol using one IP address. For example, this method makes it -possible to use the same load balancing configuration to support an HTTP -and an TCP load balancer. Load balancers sharing a virtual IP must -utilize a unique port. - -Relevant weights can be assigned to nodes using the weight attribute of the -node element. The weight of a node determines the portion of requests or -connections it services compared to the other nodes of the load balancer. For -example, if node A has a weight of 2 and node B has a weight of 1, then the -loadbalancer will forward twice as many requests to node A than to node B. If -the weight attribute is not specified, then the node's weight is implicitly -set to "1". Weight values from 1 to 256 are allowed. - -Note that nodes that are assigned to a load balancer that is delivering data to -a Galera database cluster may require a primary write node be specified to avoid -database locking problems that can occur. For this case, a load balancer can be -configured to use the special "GALERA" protocol type. When a "GALERA" protocol -is chosen, all of the specified nodes must use the node "backup" attribute to -specify whether it is a backup node or the primary node. There may only be a -single primary node specified by setting the "backup" attribute to FALSE. All -other nodes must have the "backup" attribute set to TRUE. - -Request Data -~~~~~~~~~~~~ - -The caller is required to provide a request data with the POST which -includes the appropriate information to create a new load balancer. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- **X-Auth-Token** -- **Accept: application/json** -- **Content-Type: application/json** - -Request Body -~~~~~~~~~~~~ - -The request body must follow the correct format for new load balancer -creation, examples.... - -**Request body example to create a load balancer with two nodes and an -optional "weight" assigned. Note that a default weight of 1 does not -have to be explicitly assigned** - -:: - - { - "name": "a-new-loadbalancer", - "nodes": [ - { - "address": "10.1.1.1", - "port": "80" - "weight": "2" - }, - { - "address": "10.1.1.2", - "port": "81" - } - ] - } - -**Request body example to create a load balancer using existing load -balancer virtual IP** - -:: - - { - "name":"a-new-loadbalancer", - "port":"80", - "protocol":"HTTP", - "options": {"timeout": 30000, "retries": 3}, - "virtualIps": [ - { - "id":"39" - } - ], - "nodes": [ - { - "address":"10.1.1.1", - "port":"80", - "condition":"ENABLED" - } - ] - } - -**Request body example to create a load balancer that specifies a -single primary write node for a Galera cluster** - -:: - - { - "name":"a-new-loadbalancer", - "port":"83", - "protocol":"GALERA", - "options": {"timeout": 30000, "retries": 3}, - "virtualIps": [ - { - "id":"39" - } - ], - "nodes": [ - { - "address": "10.1.1.1", - "port": "3306", - "backup": "TRUE" - }, - { - "address": "10.1.1.2", - "port": "3306", - "backup": "FALSE" - } - ] - } - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 202 | Accepted | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the load balancer requested or appropriate -error. - -**Create Load Balancer (Required Attributes with Shared IP) Response: -JSON** - -:: - - { - "name": "a-new-loadbalancer", - "id": "144", - "protocol": "HTTP", - "port": "83", - "algorithm": "ROUND_ROBIN", - "status": "BUILD", - "created": "2011-04-13T14:18:07Z", - "updated":"2011-04-13T14:18:07Z", - "options": {"timeout": 30000, "retries": 3}, - "virtualIps": [ - { - "address": "3ffe:1900:4545:3:200:f8ff:fe21:67cf", - "id": "39", - "type": "PUBLIC", - "ipVersion": "IPV6" - } - ], - "nodes": [ - { - "address": "10.1.1.1", - "id": "653", - "port": "80", - "status": "ONLINE", - "condition": "ENABLED" - } - ] - } - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+-----------------------+ -| HTTP Status Code | Description | -+====================+=======================+ -| 400 | Bad Request | -+--------------------+-----------------------+ -| 401 | Unauthorized | -+--------------------+-----------------------+ -| 404 | Not Found | -+--------------------+-----------------------+ -| 405 | Not Allowed | -+--------------------+-----------------------+ -| 413 | Over Limit | -+--------------------+-----------------------+ -| 500 | LBaaS Fault | -+--------------------+-----------------------+ -| 503 | Service Unavailable | -+--------------------+-----------------------+ - -Example -~~~~~~~ - -**Contents of Request file lb.json** - -:: - - { - "name": "lb #1", - "protocol":"tcp", - "nodes": [ - { - "address": "15.185.229.153", - "port": "443" - }, - { - "address": "15.185.226.163", - "port": "443" - }, - ], - } - -**Curl Request** - -:: - - curl -X POST -H "X-Auth-Token: TOKEN" --data-binary "@lb.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers - -**Response** - -:: - - { - "port":"443", - "id":"10", - "protocol":"tcp", - "updated":"2013-02-10T18:20Z", - "created":"2013-02-10T18:20Z", - "status":"BUILD", - "nodes":[ - { - "port":"443", - "id":"19", - "condition":"ENABLED", - "status":"ONLINE", - "address":"15.185.229.153" - }, - { - "port":"443", - "id":"20", - "condition":"ENABLED", - "status":"ONLINE", - "address":"15.185.226.163" - } - ], - "name":"lb #1", - "virtualIps":[ - { - "id":"5", - "address":"15.185.96.125", - "ipVersion":"IPV_4", - "type":"PUBLIC" - } - ], - "algorithm":"ROUND_ROBIN", - "options": {"timeout": 30000, "retries": 3}, - } - -.. _api-lb-modify: - -Update an Existing Load Balancer --------------------------------- - -Operation -~~~~~~~~~ - -+-----------------+-----------------------------------+----------+--------------------------------------------------+ -| Resource | Operation | Method | Path | -+=================+===================================+==========+==================================================+ -| Load Balancer | Update load balancer attributes | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId} | -+-----------------+-----------------------------------+----------+--------------------------------------------------+ - -Description -~~~~~~~~~~~ - -This operation updates the attributes of the specified load balancer. -Upon successful validation of the request, the service will return a 202 -(Accepted) response code. A caller should check that the load balancer -status is ACTIVE to confirm that the update has taken effect. If the -load balancer status is 'PENDING\_UPDATE' then the caller can poll the -load balancer with its ID (using a GET operation) to wait for the -changes to be applied and the load balancer to return to an ACTIVE -status. - -This operation allows the caller to change one or more of the following -attributes: - -\*name - -\*algorithm - -\*options - -This operation does not return a response body. - -.. note:: - The load balancer ID, status, port and protocol are immutable - attributes and cannot be modified by the caller. Supplying an - unsupported attribute will result in a 400 (badRequest) fault. - -Request Data -~~~~~~~~~~~~ - -Load balancer body with attributes to be updated. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -**Example** - -:: - - { - "name": "newname-loadbalancer", - "algorithm": "LEAST_CONNECTIONS", - "options": {"timeout": 30000, "retries": 3} - } - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 202 | Accepted | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -None. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Contents of Request file lb.json** - -:: - - { - "name": "newname-loadbalancer", - "algorithm": "LEAST_CONNECTIONS", - "options": {"timeout": 30000, "retries": 3} - } - -**Curl Request** - -:: - - curl -X PUT -H "X-Auth-Token: TOKEN" --data-binary "@lb.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100 - -**Response** - -status with no response body. - -.. _api-lb-delete: - -Delete Load Balancer --------------------- - -Operation -~~~~~~~~~ - -+-----------------+------------------------------------+----------+--------------------------------------------------+ -| Resource | Operation | Method | Path | -+=================+====================================+==========+==================================================+ -| Load Balancer | Delete an existing load balancer | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId} | -+-----------------+------------------------------------+----------+--------------------------------------------------+ - -Description -~~~~~~~~~~~ - -Delete load balancer removes the specified load balancer and its -associated configuration from the account. Any and all configuration -data is immediately purged and is not recoverable. - -This operation does not require a request body. - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 202 | Accepted | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -None. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Example** - -:: - - curl -X DELETE -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100 - -**Response** - -status with no response body. - diff --git a/doc/api/rest/logs.rst b/doc/api/rest/logs.rst deleted file mode 100644 index 67b03966..00000000 --- a/doc/api/rest/logs.rst +++ /dev/null @@ -1,70 +0,0 @@ -.. _api-logs: - -==== -Logs -==== - - -Archive log file to Object Storage ----------------------------------- - -Operation -~~~~~~~~~ - -+----------+------------------------------------+--------+-----------------------------------------------------+ -| Resource | Operation | Method | Path | -+==========+====================================+========+=====================================================+ -| Logs | Archive log file to Object Storage | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/logs | -+----------+------------------------------------+--------+-----------------------------------------------------+ - -Description -~~~~~~~~~~~ - -The operation tells the load balancer to push the current log file into an HP Cloud Object Storage container. The status of the load balancer will be set to 'PENDING_UPDATE' during the operation and back to 'ACTIVE' upon success or failure. A success/failure message can be found in the 'statusDescription' field when getting the load balancer details. - -**Load Balancer Status Values** - -+----------------+---------------+--------------------------------+ -| Status | Name | Description | -+================+===============+================================+ -| ACTIVE | Load balancer | is in an operational state | -| PENDING_UPDATE | Load balancer | is in the process of an update | -+----------------+---------------+--------------------------------+ - -By default with empty POST data the load balancer will upload to the swift account owned by the same tenant as the load balancer in a container called 'lbaaslogs'. To change this the following optional parameters need to be provided in the POST body: - -**objectStoreBasePath** : the object store container to use - -**objectStoreEndpoint** : the object store endpoint to use including tenantID, for example: https://region-b.geo-1.objects.hpcloudsvc.com:443/v1/1234567890123 - -**authToken** : an authentication token to the object store for the load balancer to use - -Request Data -~~~~~~~~~~~~ - -The caller is required to provide a request data with the POST which includes the appropriate information to upload logs. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -The request body must follow the correct format for new load balancer creation, examples.... - -A request that uploads the logs to a different object store - -:: - - { - "objectStoreBasePath": "mylblogs", - "objectStoreEndpoint": "https://region-b.geo-1.objects.hpcloudsvc.com:443/v1/1234567890123", - "authToken": "HPAuth_d17efd" - } \ No newline at end of file diff --git a/doc/api/rest/node.rst b/doc/api/rest/node.rst deleted file mode 100644 index b7ac8687..00000000 --- a/doc/api/rest/node.rst +++ /dev/null @@ -1,566 +0,0 @@ -.. _api-node: - -===== -Nodes -===== - - -.. _api-node-list: - -List All Load Balancer Nodes ----------------------------- - -Operation -~~~~~~~~~ - -+------------+-----------------------------------+----------+--------------------------------------------------------+ -| Resource | Operation | Method | Path | -+============+===================================+==========+========================================================+ -| Node | Get list of load balancer nodes | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes | -+------------+-----------------------------------+----------+--------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -List all nodes for a specified load balancer. - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the load balancer nodes requested or 404, if -not found. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Example** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes - -**Response** - -:: - - { - "nodes" : [ - { - "id":"410", - "address":"10.1.1.1", - "port":"80", - "condition":"ENABLED", - "status":"ONLINE" - }, - { - "id":"236", - "address":"10.1.1.2", - "port":"80", - "condition":"ENABLED", - "status":"ONLINE" - }, - { - "id":"2815", - "address":"10.1.1.3", - "port":"83", - "condition":"DISABLED", - "status":"OFFLINE" - }, - ] - } - -.. _api-node-status: - -Get Load Balancer Node ----------------------- - -Operation -~~~~~~~~~~~~~~ - -+------------+-------------------------------------+----------+-----------------------------------------------------------------+ -| Resource | Operation | Method | Path | -+============+=====================================+==========+=================================================================+ -| Node | Get a specific load balancer node | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} | -+------------+-------------------------------------+----------+-----------------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -This operation retrieves the configuration of a node. - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the load balancer node requested or 404, if -not found. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Example** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes/410 - -**Response** - -:: - - { - "id":"410", - "address":"10.1.1.2", - "port":"80", - "condition":"ENABLED", - "status":"ONLINE" - } - -.. _api-node-create: - -Create Load Balancer Node -------------------------- - -Operation -~~~~~~~~~ - -+------------+-----------------------------------+----------+--------------------------------------------------------+ -| Resource | Operation | Method | Path | -+============+===================================+==========+========================================================+ -| Node | Create a new load balancer node | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes | -+------------+-----------------------------------+----------+--------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -Add a new node to any existing load balancer. When a node is added, it is -assigned a unique identifier that can be used for mutating operations -such as changing the condition, or removing the node from the load -balancer. When a node is added to a load balancer it is enabled by -default. - -Relevant weights can be assigned to nodes using the weight attribute of the -node element. The weight of a node determines the portion of requests or -connections it services compared to the other nodes of the load balancer. For -example, if node A has a weight of 2 and node B has a weight of 1, then the -loadbalancer will forward twice as many requests to node A than to node B. If -the weight attribute is not specified, then the node's weight is implicitly -set to "1". Weight values from 1 to 256 are allowed. - -Request Data -~~~~~~~~~~~~ - -The request must contain information regarding the new node to be added. -More than one node can be added at a time. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -The request body defines the attributes of the new node to be created. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 202 | Accepted | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the load balancer requested or 404, if not -found. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 413 | Over Limit | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Contents of Request file nodes.json** - -:: - - { - "nodes": [ - { - "address": "10.1.1.1", - "port": "80", - "weight": "2" - }, - { - "address": "10.2.2.1", - "port": "80", - "weight": "4" - }, - { - "address": "10.2.2.2", - "port": "88", - "condition": "DISABLED" - } - ] - } - -**Curl Request** - -:: - - curl -X POST -H "X-Auth-Token: TOKEN" --data-binary "@nodes.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes - -**Response** - -:: - - { - "nodes": [ - { - "id": "7298", - "address": "10.1.1.1", - "port": "80", - "condition": "ENABLED", - "status": "ONLINE", - "weight": "2" - }, - { - "id": "293", - "address": "10.2.2.1", - "port": "80", - "condition": "ENABLED", - "status": "OFFLINE", - "weight": "4" - }, - { - "id": "183", - "address": "10.2.2.2", - "port": "88", - "condition": "DISABLED", - "status": "OFFLINE" - } - ] - } - -.. _api-node-modify: - -Update Load Balancer Node Condition ------------------------------------ - -Operation -~~~~~~~~~ - -+------------+-------------------------------+----------+-----------------------------------------------------------------+ -| Resource | Operation | Method | Path | -+============+===============================+==========+=================================================================+ -| Node | Update a load balancer node | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} | -+------------+-------------------------------+----------+-----------------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -Every node in the load balancer is either enabled or disabled which -determines its role within the load balancer. When the node has -condition='ENABLED' the node is permitted to accept new connections. Its -status will eventually become 'ONLINE' to reflect this configuration. -When the node has condition='DISABLED' the node is not permitted to -accept any new connections. Existing connections to the node are -forcibly terminated. The nodes status changes to OFFLINE once the -configuration has been successfully applied. - -Relevant weights can be assigned to nodes using the weight attribute of the -node element. The weight of a node determines the portion of requests or -connections it services compared to the other nodes of the load balancer. For -example, if node A has a weight of 2 and node B has a weight of 1, then the -loadbalancer will forward twice as many requests to node A than to node B. If -the weight attribute is not specified, then the node's weight is implicitly -set to "1". Weight values from 1 to 256 are allowed. - -The node IP and port are immutable attributes and cannot be modified -with a PUT request. Supplying an unsupported attribute will result in a -fault. A load balancer supports a maximum number of nodes. The -maximum number of nodes per load balancer is returned when querying the -limits of the load balancer service. - -Request Data -~~~~~~~~~~~~ - -Request data includes the desired condition of the node as well as the -optional weight of the node. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -The request body includes the node 'condition' attribute and its desired -state. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 202 | Accepted | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -None. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Contents of Request file node.json** - -:: - - { - "condition": "DISABLED", - } - - OR - - { - "condition": "ENABLED", - "weight": "2" - } - -**Curl Request** - -:: - - curl -X PUT -H "X-Auth-Token: TOKEN" --data-binary "@node.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes/100 - -**Response** - -status with no response body. - -.. _api-node-delete: - -Delete Load Balancer Node -------------------------- - -Operation -~~~~~~~~~~~~~~ - -+------------+-------------------------------+----------+-----------------------------------------------------------------+ -| Resource | Operation | Method | Path | -+============+===============================+==========+=================================================================+ -| Node | Delete a load balancer node | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} | -+------------+-------------------------------+----------+-----------------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -Delete node for a load balancer. - -.. note:: - A load balancer must have at least one node. Attempting to remove the last - node of a load balancer will result in a 401 error. - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 202 | Accepted | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -None. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Request** - -:: - - curl -X DELETE -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes/100 - -**Response** - -status with no response body. \ No newline at end of file diff --git a/doc/api/rest/protocols.rst b/doc/api/rest/protocols.rst deleted file mode 100644 index 26856e64..00000000 --- a/doc/api/rest/protocols.rst +++ /dev/null @@ -1,107 +0,0 @@ -.. _api-protocols: - -========= -Protocols -========= - - -Get List of Supported LBaaS Protocols -------------------------------------- - -Operation -~~~~~~~~~ - -+-------------+-----------------------------------+----------+-----------------------------+ -| Resource | Operation | Method | Path | -+=============+===================================+==========+=============================+ -| Protocols | Get list of supported protocols | GET | {baseURI}/{ver}/protocols | -+-------------+-----------------------------------+----------+-----------------------------+ - -Description -~~~~~~~~~~~ - -All load balancers must be configured with the protocol of the service which is -being load balanced. The protocol selection should be based on the protocol of -the back-end nodes. The current specification supports HTTP (port 80) and TCP -(port 443) services. HTTPS traffic is supported currently via the TCP -connection. Support for SSL termination on the load balancer is not -currently supported. - - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the currently supported protocols and port -numbers. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/protocols - -**Response** - -:: - - { - "protocols": [ - { - "name": "HTTP", - "port": 80 - }, - { - "name": "TCP", - "port": 443 - } - ] - } \ No newline at end of file diff --git a/doc/api/rest/vip.rst b/doc/api/rest/vip.rst deleted file mode 100644 index 8024a8a9..00000000 --- a/doc/api/rest/vip.rst +++ /dev/null @@ -1,101 +0,0 @@ -.. _api-vips: - -=========== -Virtual IPs -=========== - - -Get List of Virtual IPs ------------------------ - -Operation -~~~~~~~~~ - -+--------------+---------------------------+----------+-------------------------------------------------------------+ -| Resource | Operation | Method | Path | -+==============+===========================+==========+=============================================================+ -| Virtual IP | Get list of virtual IPs | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/virtualips | -+--------------+---------------------------+----------+-------------------------------------------------------------+ - -Description -~~~~~~~~~~~ - -This operation lists all the virtual IP addresses of a load balancer. The -maximum number of VIPs that can be configured when creating a load -balancer can be discovered by querying the limits of the load balancer service. - -Request Data -~~~~~~~~~~~~ - -None required. - -Query Parameters Supported -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -None required. - -Required HTTP Header Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**X-Auth-Token** - -Request Body -~~~~~~~~~~~~ - -None required. - -Normal Response Code -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+---------------+ -| HTTP Status Code | Description | -+====================+===============+ -| 200 | OK | -+--------------------+---------------+ - -Response Body -~~~~~~~~~~~~~ - -The response body contains the load balancer VIP list requested or 404, -if not found. - -Error Response Codes -~~~~~~~~~~~~~~~~~~~~ - -+--------------------+----------------+ -| HTTP Status Code | Description | -+====================+================+ -| 400 | Bad Request | -+--------------------+----------------+ -| 401 | Unauthorized | -+--------------------+----------------+ -| 404 | Not Found | -+--------------------+----------------+ -| 405 | Not Allowed | -+--------------------+----------------+ -| 500 | LBaaS Fault | -+--------------------+----------------+ - -Example -~~~~~~~ - -**Curl Request** - -:: - - curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/virtualips - -**Response** - -:: - - { - "virtualIps": [ - { - "id": "1021", - "address": "206.10.10.210", - "type": "PUBLIC", - "ipVersion": "IPV4" - } - ] - } diff --git a/doc/architecture/index.rst b/doc/architecture/index.rst deleted file mode 100644 index 1024d4de..00000000 --- a/doc/architecture/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. _architecture: - -Architecture -============ - -.. toctree:: - :maxdepth: 2 - - production - logical \ No newline at end of file diff --git a/doc/architecture/logical.rst b/doc/architecture/logical.rst deleted file mode 100644 index dccec4a1..00000000 --- a/doc/architecture/logical.rst +++ /dev/null @@ -1,32 +0,0 @@ -==================== -Logical architecture -==================== - -See information for each component for more information. - -* :ref:`libra-pool-mgm` - A node pool manager to keep a warm spare pool of load balancers ready -* :ref:`libra-worker` - A node worker to asynchronously communicate to the API server -* :ref:`libra-api` - A customer API server -* :ref:`libra-admin-api` - An administrative API server - -The API server is based on a modified version of the `Atlas API specification -`_. - -High level overview -------------------- - -.. image:: /img/libralayout.png - -Here you can see that the pool manager spins up the required Nova nodes with -the load balancer image. It then hands the details of these nodes over to the -Admin API server. - -The client sends an API request to the API server, which in turn sends the -configuration information to the worker on the load balancer node. The worker -has a plugin system to speak to multiple load balancer types but is currently -designed to use HAProxy. - -The statsd monitoring system routinely probes the workers and can alert on as -well as disable faulty nodes. - -The parts of this diagram in orange are provided by the Libra codebase. diff --git a/doc/architecture/production.rst b/doc/architecture/production.rst deleted file mode 100644 index 90783786..00000000 --- a/doc/architecture/production.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. _architecture-production: - -======================= -Production Architecture -======================= - -See information for each component for more information. - -* :ref:`libra-pool-mgm` - A node pool manager to keep a warm spare pool of load balancers ready -* :ref:`libra-worker` - A node worker to asynchronously communicate to the API server -* :ref:`libra-api` - A customer API server -* :ref:`libra-admin-api` - An administrative API server - -High level overview -------------------- - -* Some cloud or virtualization system. -* User and/or Tenant with required privileges / resources. -* Ubuntu 12.04 Precise x86_64 image for :term:`instance`. -* HAProxy for LoadBalancers. -* Gearman for Libra service communication. -* MySQL Galera Multi-master cluster for HA databases. - -Think of each service as a :term:`instance`, for each service or :term:`instance` -running services we create 1 pr :term:`az`. - - -Diagram -------- -In the case below the setup is - -* 1 gearman :term:`instance` pr :term:`az`. -* 1 MySQL Galera :term:`instance` pr :term:`az`. -* n+ workers running HAProxy accross multiple pr :term:`az` - -.. image:: /img/production.png diff --git a/doc/conf.py b/doc/conf.py deleted file mode 100644 index 18dbcd79..00000000 --- a/doc/conf.py +++ /dev/null @@ -1,234 +0,0 @@ -# -*- coding: utf-8 -*- -# -# OpenStack CI documentation build configuration file, created by -# sphinx-quickstart on Mon Jul 18 13:42:23 2011. -# -# This file is execfile()d with the current directory set to its containing -# dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import datetime -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ---------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -#extensions = ['rst2pdf.pdfbuilder'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Libra LBaaS Toolset' -copyright = u'2013, Hewlett-Packard Development Company, L.P.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "%d-%02d-%02d-beta" % ( - datetime.datetime.now().year, - datetime.datetime.now().month, - datetime.datetime.now().day -) -# The full version, including alpha/beta/rc tags. -release = "%d-%02d-%02d-beta" % ( - datetime.datetime.now().year, - datetime.datetime.now().month, - datetime.datetime.now().day -) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'LBaaSdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, -# documentclass [howto/manual]). -latex_documents = [ - ('index', 'Libra-{0}.tex'.format(version), u'Libra LBaaS Toolset Documentation', - u'Hewlett-Packard Development Company, L.P.', 'manual'), -] - -#pdf_documents = [('index', 'Libra-{0}'.format(version), u'Libra Client, Worker and Pool Manager Documentation', u'Andrew Hutchings and David Shrewsbury')] - -#pdf_break_level = 1 - -#pdf_stylesheets = ['sphinx', 'libra'] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output ------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -#man_pages = [ -# ('index', 'lbaas', u'Libra LBaaS Toolset', -# [u'Hewlett-Packard Development Company, L.P.'], 1) -#] diff --git a/doc/config.rst b/doc/config.rst deleted file mode 100644 index a238740c..00000000 --- a/doc/config.rst +++ /dev/null @@ -1,236 +0,0 @@ -.. _configuration: - -============= -Configuration -============= - -Configuration of Services -========================= - -Configuration File Format -------------------------- - Libra uses the `Oslo configuration library `_ - so its format is similar to other OpenStack programs. - -DEFAULT Section -^^^^^^^^^^^^^^^ - - The ``[DEFAULT]`` section contains generic options common to the various - Libra utilities (worker, mgm, etc). - - .. code-block:: ini - - [DEFAULT] - daemon = true - user = libra - group = libra - verbose = false - debug = false - billing_enable = false - notification_driver = [] - default_notification_level = INFO - default_publisher_id = None - host = localhost - kombu_ssl_version = - kombu_ssl_keyfile = - kombu_ssl_certfile = - kombu_ssl_ca_certs = - rabbit_use_ssl = false - rabbit_userid = guest - rabbit_password = guest - rabbit_host = localhost - rabbit_port = 5672 - rabbit_hosts = [] - rabbit_virtual_host = / - rabbit_retry_interval = 1 - rabbit_retry_backoff = 2 - rabbit_max_retries = 0 - rabbit_ha_queues = false - control_exchange = openstack - amqp_durable_queues = false - - Options supported in this section: - - .. option:: daemon - - Run as a daemon. Default is 'true'. - - .. option:: user - - Specifies the user for the process when in daemon mode. Default is the - current user. - - .. option:: group - - Specifies the group for the process when run in daemon mode. - - .. option:: verbose - - Prints more verbose output. Sets logging level to INFO from WARNING - - .. option:: debug - - Prints debug output. Sets logging level to DEBUG from WARNING - - .. option:: billing_enable - - Enables the sending of billing information to a rabbitMQ host. It sends - create and delete loadbalancer messages as well as exists and usage - messages on a periodic, configurable basis. See admin_api config. - - .. option:: notification_driver - - Driver or drivers to handle sending notifications for metering / billing. - For instance, the openstack rpc driver is - openstack.common.notifier.rpc_notifier. - - .. option:: default_notification_level - - Default notification level for outgoing notifications - - .. option:: default_publisher_id - - Default publisher_id for outgoing notifications - - .. option:: host - - Default host name to use in notifications. Will use default_publisher_id - or gethostname() if not set. - - .. option:: host - - Default host name to use in notifications. Will use default_publisher_id - or gethostname() if not set. - - .. option:: kombu_ssl_version - - SSL version to use (valid only if SSL enabled). valid values are TLSv1, - SSLv23 and SSLv3. SSLv2 may be available on some distributions - - .. option:: kombu_ssl_keyfile - - SSL key file (valid only if SSL enabled) - - .. option:: kombu_ssl_certfile - - SSL cert file (valid only if SSL enabled) - - .. option:: kombu_ssl_ca_certs - - SSL certification authority file (valid only if SSL enabled) - - .. option:: rabbit_use_ssl - - Connect over SSL for RabbitMQ - - .. option:: rabbit_userid - - The RabbitMQ userid - - .. option:: rabbit_password - - The RabbitMQ password - - .. option:: rabbit_host - - The RabbitMQ broker address where a single node is used - - .. option:: rabbit_port - - The RabbitMQ broker port where a single node is used - - .. option:: rabbit_hosts - - RabbitMQ HA cluster host:port pairs - - .. option:: rabbit_virtual_host - - The RabbitMQ virtual host - - .. option:: rabbit_retry_interval - - How frequently to retry connecting with RabbitMQ - - .. option:: rabbit_retry_backoff - - How long to backoff for between retries when connecting to RabbitMQ - - .. option:: rabbit_max_retries - - Maximum retries with trying to connect to RabbitMQ (the default of 0 - implies an infinite retry count) - - .. option:: rabbit_ha_queues - - Use H/A queues in RabbitMQ (x-ha-policy: all). You need to wipe RabbitMQ - database when changing this option. - - .. option:: control_exchange - - AMQP exchange to connect to if using RabbitMQ or Qpid - - .. option:: amqp_durable_queues - - Use durable queues in amqp. - -Gearman Section -^^^^^^^^^^^^^^^ - - The ``[gearman]`` section contains options specific to connecting to - a Gearman job server. All of the Libra utilities will read this section - since each connects to Gearman. - - In order to support SSL connections, it is required that all three SSL - related options be supplied. Also, the user owning the process must be - able to read all SSL files. - - .. code-block:: ini - - [gearman] - servers = 10.0.0.1:4730, 10.0.0.2:4730 - poll = 1 - ssl_ca = /etc/ssl/gearman.ca - ssl_cert = /etc/ssl/gearman.cert - ssl_key = /etc/ssl/gearman.key - - Options supported in this section: - - .. option:: keepalive - - Enable TCP KEEPALIVE pings. Default is 'false'. - - .. option:: keepcnt - - Max KEEPALIVE probes to send before killing connection. - - .. option:: keepidle - - Seconds of idle time before sending KEEPALIVE probes. - - .. option:: keepintvl - - Seconds between TCP KEEPALIVE probes. - - .. option:: poll - - Gearman worker polling timeout. Default is 1. - - .. option:: reconnect_sleep - - Seconds to sleep between job server reconnects. Default is 60. - - .. option:: servers - - Comma-separated list of Gearman job servers and port in HOST:PORT format. - - .. option:: ssl_ca - - Gearman SSL certificate authority. - - .. option:: ssl_cert - - Gearman SSL certificate. - - .. option:: ssl_key - - Gearman SSL key. diff --git a/doc/glossary.rst b/doc/glossary.rst deleted file mode 100644 index d8696d2e..00000000 --- a/doc/glossary.rst +++ /dev/null @@ -1,36 +0,0 @@ -======== -Glossary -======== - -.. glossary:: - - instance - A Virtual Machine in "Cloud" speak. - - az - A logical grouping of resources typically used to provide HA. - - - database - - A software that stores data like a SQL server or similar. - - device - - A Loadbalancer Device which either runs in Software aka - :ref:`libra-worker` with :term:`haproxy` or any other kind of - software / hardware. - - vip - - A virtual ip is a ip address which is assigned to the :term:`device` - and can be moved around if needed. - - gearman - - A job system. See http://gearman.org/ for more info. - - haproxy - - Software loadbalancer that runs typically on Linux. Used as the base - for the Lira LBaaS tools. diff --git a/doc/img/libralayout.png b/doc/img/libralayout.png deleted file mode 100644 index aaca394750ddba2c2533103c726123a615de4bb5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65055 zcmbTd1yojF^fvg?-Q67uNOy^(h#;NPNOyNhOG!$1cXyW}AR^t;-Cc9=H{Z8r&CHs$ z=C4aHzTWrTbN4y>+57DMJYgT?rO}Xyks%NW+FKb3B?tuiCj!{GT3RFP<*IrtF4>Fs+-gheEHbRuZ(Oz$2Dgc9;r zLQKVNZhz6$4R^8!{@AtQlQK^`fead-*amZ_@jJ#+_Mz5LS2`I{d@A(Z=by`Gqn(2ay{5 zg1`qt1qB7!+1cqEU@lS{VPRrnaa-@~?C^Q=z}pk7eaK^hhHPI5;G?QiCj6jOf0?Di zxH{d&KNlQKNl6I_pyb-Uz;_Gz78+V){$p%xP;cIq^2G}zAM3fEN>xUc1`F`T?`FTm z)wRNZe>F6wXP{=GLT4jpGd`w-g@fl{!@WIBp$o68QDbg^B7QpHj#7itUrAYk*8 zKt{$XZdrLb$>iB_ZAGQnTM7DBxolOMCGKt7=o@P+m!l=L!0xUtIWWV-?5ZkEdwY9CtLcOwu?vz{bV{Ek(YtQW z2x5|xhyJ^TYIA|#4GvbU;6oJ{r1_RBL?ydxvQ#}bHFfyk9T=U%>J?q>?vyA2#9#D2 zASx*-#iXSC_&@(|Ax&rIL`>A;^=O9A(C~2JxY=j5QWJf8YU-SjR5fKpCQEI6SZ+c= zQ*|>gVi{WO2;b{y|L0XgVk3^kq%FHFh7vDOfmg&OkyIcOatH;#ik1jb;Wcg}^8dXi z_-AMiq8KLO^E-!z5;MVndfqS8|9>wz1F{3ulweu~l#hnY*spZio+t=OgZ;9mmG&63v7^e6Sf_z<*i#Q#RESQ)x$g)g$R#w&qY{Vz?HJNt1 zGgNKYm;1>6H$TH@_?*`<$ul|78Z5X`lK)M1$iZ5p9yQ|W4DWYbWv=Csj~Uq%`l!tz z<8KQr&rRGYvW1BKpCmCqj6>x|LkgoQguL(VP8NvL1YC-Kn|BDgb8*4JF)|PJe(@bK zp~CrZbCBnD`?>0Q!24R?{9;qJZu9;HcVE?vGk}#nSLnvcq*!HVW3JM6} zy)O#dEojhn6k{F^l;Xgeain_&@m-F|@u;^Fodo%acX>PQragpKP%t`9+#~0h39YaI<(gg!^Jzb$7 z`}+qcGdN?^x5rTXkB&GMy5T{AaVP`AXF>u_I|dn_C zp7deiLmYBNVgI2-DD%K~8UzT$zdJi-7|)sKBMFJN)4>DcWzCU8t*66lRQG@G<)7W(B zwT(PagY7MvvNT5bEROWZ#A3ZO%%~Iv421C9V|~+@ikAT;Kc8-41p{fx;Mt@Ru}<9N zkd=-3GWkNLSlMy-oxG{58BHX;*V-m4te-lA;Fx&3 z2Bz@SsXbvJ=N@GEWpifaYPgbZ+y_Ye3#WoS6*q9!g3fS|FWwd}!DvQz1Hw8|h{&6_>~n(z0b2lRL*!_#l>%0*Yy6=#n}xY;j|=?lw@L#A z;Z;&@V*k%AcQYsvArLIXz){}Kb$f_kVWVyHSc2M1jrqAAGd9YL+j>uVX=kSGz0z4r z1Yyz{+CZ=kn1cgrQIJp1J|t%@XPu4w0I|yJY@8W^qyJ5kU()+w3a3s;A{`M+=VKqC!5Nc9mnyf-mk9P*23 z9TXl6i{M9NQ0Hy(^jMx2f)n$rFK4L>3OYW%6w{C}wF#vUwtj|`AwyJhe)tR#A2woC zjn?x)i^$q*(I=^%{3flDM$OlhkY~8yh5qM$a0{(4jR(!23 zRCV6ca$bjYgkRvx9(OwVd-=hx;|f=wzGO7zZVv{ zxozy4U~P|*W_T}lz6x`FSc_qQdu^cHbVMBXc0%_}XLj_8=+l*mi_MZ>Ua5LkO@r99 z?!)mT@3Oe{5d`A)R8LT4f58fYgh>R%;$-ygvl3{W^UO6Jqew_fQn9gNTGXdJhmTOg zWQuq&&xM~dAyUeaW*sO`1nDMEck%4HlDn*oq=-Lx62R5m-0pfNduvAvyjfO}ki_E{XIJGK8%ZKJ7ETOpplDhQ6ZT&$t3iEU8%tyj%s=|491~e;rT-jlh@**5kcM>+ z9)w>xH4AJ~GBT=I4W9+$XDGIS?&*0G8;dy@kOfag94Yff5J6N_bZEoj--sU9^)`#d zyc0@U--ithKD58c0HuhlNvUERULO=H>;o|{ z0GVOBJVL(G_y04@kERQXzudLIS*OB>pG0Yw4>9t+565SL*0;1oY1Tp4*4F;-9b#nl zsJr`+aAF*GgevsD82Eo!4Bn{WAiXmpBb{tOLD{`Nx-+4cDz8NQDY1fz?k*yTA9k zukqrS2t!?MhwojQjSPCjT`;|4ck{?1Evt=c(U1=r7IkGEwsE$z z>+5GP{H`{VmXMaNr7WRL8aCh}UfthEWo2cR$^R|Y81*mpgzY+a^fWM_LWK{!iDria zV@r5=2)M>Pp~VIOXFv~=fEE;Nl_HA&535UvP>GUKcwCsI0pd&a{d?~J_#*Ld1`_|{ z%lou1lCLEs&R6mZ3JeFM$P#=QK+5nEFD*SCi<}%?U45E3QZM0!a)HXtMKaCJeNJ-_Gj%8HU)bs?Q5FDQjQqYKsQaEn@?7lndj0U;& z8n{XiC$g+=ZzEb-S`Ha=^bHJ9i-?e4US5WN`SQ72|MlCqNXW>@F==UDdj9T*3(Yfi z<~We9VWoT}Dh37wklV8nq7w2u(01Hxl3d^3LPPSDUVv!5F`RS)oC*TT z$;n|e`$fr^AbXex8wytcyenPATML#F8gndNQ2nsAtxf3Un`s6XRH%!K%U|$4ORh)j zw{PDXs;WL6ul7np{(@u2ORuD)BnBxdsc`TXSdxmk%Wi9E_ETk=c)^})k-QQRLojmC z5juqkbA#DqHf144w*N1chhD|++^esFrUDf7|gb($2*9h_uVDcix)c`!{g)QSP}Rv0iB(q zzkmNezS+uj+{&>qc!tj!2*)?nbU{~mNSd(cWk&%OBJ}3ZZ_2A@OzIWMTZ#$a;~0$o z6e&XdR(nHo6*5J*QyN1;5SBdl%84Uuy>1-j()h5%gHioUOPM4T`+n!Yvs8^zD#itm zhJ)-cwWkW=|Jz>N^o{cq$N_fC6K;10jZ9vfVA&yj&bulJ7@gd zfrU^~Q9&VKke5x?P5k~1$@;A;4dN=uiJ1R9KHLElBhAdrtVHrI6^%C)%BORL9QZ0xL+WgEWW=}eXmn{L6pP00_S_tmkge-p)5(deKp`{g(@4sT2>v<;#lBlBGJ*V9sLtZx5H zPEJl3Sy=kJ{9&qX76lyd=38(6MzCUHV(LHoUL7vZba>wh527F;bw`m2+HL&6W7e#J z1ygxZ zuHDJf=Vf%-pQ&D0Hi{Phbof2DWrPD61x4}02Z*Pqr(7xz8Ui|L*Edd6Onm$SvdEH1 z*XJETZpiuRCmX_0VTK2C2nI#4%FFU`F0DqJNC0S4ZcAX3stY*KW$Lw;7pj%^4@8o%1Mhno0uWa0>G1(7 zYl@Yf>R%RTA@OM}BQh(Ca@$Ej=w>~Rk%_6bat8oN!|ww0!Dxz*akDa~G!U|=czEzT zIy$yzs*T#q*KRNNsFH?{X4ls)-^o?zGzavCU^s43{!5#n%gV}*doe^`W5V|J^{qTy zEmu0s?C)C{8yhD-0noC$*i}vAcZ3J9^jWLCf(sXega@@yA@kLhyP+X9m@7hHx8G6c zQ^&wTp{3!oNrXMPaFP2zhLCVu#JeZYoqv~8v8D#qC1EyR+a+Ajt3wt6e*Q3s1SF@u zy`2yXi^>N$0$y;YfOCm^6Ih8*1(BP)nOtcMfS%*`H|>u-Jv~3(bF#B5rt#V5Y_G0T zL1Th%_S-DBgi%pZ<$(B2LqpRK+*PgRzPPyfZ(?B5(ojetlnF)^_V3F)Nq+Uf?a%F4?2i3P%k0kds7W5J4@UtJLt%6(HZ z*DRT|sQ%dM^7rRA&eAkrUEPeCPG8^giHQlJjL67HU}9Umv`?;{vGH55-r003q4p^|DgJRUncoGh*LqP ziP!}os>JKXPt#0qwbfd6U3a*b3E^zWh)R|h1Clk&R$s-y z(_!`dLb0lj}#MF1OysBzT}|Z|58F2 zcvQdITF2%wH`j0fPFbNgvBMi~=i|>6$A2Fbz4Sd2dBw*^2o{|g_ut6WE#-QhJ|HoJ zfqWt242J9ZZ!BMi1X*^Eb2Io)|IMl72YAO{1@B2d2L)BL5hfRB$0sCEadP5-T!R@k z$Kw;owUUxT^73fS>b39@$Az~qJd90DsMy)Dau{n~L(X?6QCK_gIZ$%Ka3sjFE=Kj) zDCz0p*$Xjf&OYJ)nKBt-iO*7DFk1Zp$5D)>hK!{qiVun`8`rlB0!b|3{h|Dh zgwvIUJ)>G7UXfh_;U#6c@eVZc$Vm5Iq*%BMDE{R=RB_yg4?+IT86TS+;Cvr$1osnm zw)cIp{WD&~ATm3Yq%2WNhRGvY7vufqY_p@Pr$fc&J*VJ97Av2t@;iJ^bbPIs{rmbP zu0y1{co|`3ElKcE7Zb*_z;WQUc`W{m6Z<9~cKD7C5kd|#OmF}sxo9aYDEJ%A(h1$d z<%AR}RW;^3MC);g>l7^@>;IR7iFWC{tG{k@XsRVw{pit*Vw?>7?V43vSC>vS`u=jz z#JDGW+W~oyZl{dzz=X-0kwmAvwP9I@|@a{Nf8 zQQtLlGS)p%aI#pWuV2A1-)Q~Xp+`(^y8&-192*q2wZi1XoCj}vip&BT3gk@99#cPm zhP}U?_o&rvJ9Wge5tEFRLcbqyZae2S{;D^1`jJ^jRFB9I@bR6p@W#6SNaD(D6U_R zvH&3Lv)%KR9`ui9V^&UgxI7k%mZsOgi%M?-8ECF%WrTe-pM#xX!TCa0m7KGbAJ)Sg z`5f0$agEb6==H5FPB!I94o=Q$+ar0o?Mnt+js}?A7E!9mgBpZc-b%y4OVJcBGalkd zjanD7Ij2K7Qw6<4N|mjB()4lrE}EG(6QIGJCj(Ihd22bk4j$a zNoZ#hmT5J^=I7_5{#0oeZ7dU8<{4bk|2Wi><&hn;gE{FVIy#v4oad)HX_5+f5x%U4 z@zM@*7|ilm+nqqofD)Ujc4Xipj+Cq%zO*L!)!fhTCSYvetJJzuFNroybm*WTw$;!h5unOQTbW?Y;7D693R{IZ@1@ z^i>&?fWWjGiV_@a!ot+YX||bp87G;CSpOOt#l*yb2E;~sG=FeIhkXp96et?B zvMs|JFE{ytFwXnEfT+5<+V1i=Aac%M5N1*^C;^##1UlyH*MPb@t_Yow&F&Y zj3>1PtfFo=*t$0$G!FgvQ8U!~Q+;hLH$hs6mzF!ZVWg6pn;Q>ADSTg}kjj%{ak!v- zyDImlxyJIxv(Y`j_Qm||+3u2YTj23nzT}KNPe*?lyZtIkq#84Lei-5=$x?<>+bp#q zF)$R;JGYrEH2Gf|EV}*8hBrD|M=n=G0mZ9@1)Zj*CXGL*TkH5;{9KtNUoSodnr}Lx zN&h{gM1%%sJAGT*u`W5kh8c+E#)B|cN{V?`6it7E&(v;3(wF#a6-KwEi zV+-cTo`ehHWG1GoV9KQ3EKV6HNPmC-`$MCKPE)Lhh%NVxBc4Qg9i2prS~?Q?T=6G2`XxbXkwUhccl8ey5+^}pfz-Tif{2cK*I1*a~7LbDN^b+ zHCM(KnTD;_>!EmJV1H1P4-aR)GmNYs5$~B)3^zC0KmI{me^bj_P;~Nxmb;}85$SxTbH{Dx)2_<}87P|xtqpDHH-5xzl>xRPOgwyeCq8ol)a z!Jm;s4M&TLD%5JTU&Js!hj`pfTTM{s<&z$DZu?C}v)xYiq? zGL`zLE{b5uz(xWV$`#G}H01u&>;@l=9=*0ZMg~uZEFV)+f2ZKCbC@|!gmeSsCuO|f zYwXp{U{8@FuYC^E8(Lxy5<<(?so?V8bm@hJS9?X^gGbD0p={5oh53G|4J=(tcXB^g zK|;^q5pLTqN?f_Ab{U!`thS1SpT|gA*@{kE-DT{bdg$k~tATyNdp^X%)}zQG7JaGa zW|7BIR&Ie$iKBeMhp!x7uh%R_kH_Xygx2v<)6XQ8%k9V=VPTK@$w`MO(RzxKM=azo zHkSM)w#L)g+ha4i7nd1q546UXEc&;w+?SF`D~)85O)($f5d@Jk$W0=OmZlhA;Z?ia zVl_~_8koctZqvzka|{=rt5;JgT~O#xFt{^NCXq%ER3NJUJH;(&bY_r+#1Er|e$k%ulYR z#8|Fi?u(*W7%^bu`ruzCL_?k)i$oy(;@m_3GC*g2J(Zl9iG|ZykH=K(73rjs&_fZ6 z*FyfakuSS$T54o|Di_~<1cSw?(9|gl@%gS9oGtGJ(smeDL04K#*4JFVA8gD2H`?LiT+MUdlLRjb>F8>uN+(1SD+b^QOdt z!P|7nWKxre#lZ-$8bNXGut^m=Ra;m=h|30((&&BvoS@Vd zyeRo=Pm*u;U2KA7-=PuRwQ~qQLZXLSYdIMfR`n7s{HGQLC8S^3k#k%Ko`Q5f znOkaywY(ie-JMBz`lq6{e-}J#5d(Ijw*4v!aHL%_ppg0Pyb=h!g!9bqX3}|e-qJ#= zd2K@TGAIun;28v6w1(R;7vM6CCgr?yy9itfXVGfo-xv)fNq>`IcECE=FF}@Q^-F~) z86)$kitYFR;sU(r|5@pbPZLza{7$H8(HIR+VZl;1L)KGb}{a z&fPSQDCe<7ul7_dXUC?WUq2f0OmT=~euCTLwjjJSl&di2R)8_eEnO{e0Nm2so zq1MZ-shv2ID<{0Rf2R+$n3uAit2Bn~nrj|$#Q2paDNPuKf<1GxT|sVSQEBBfA$1av zf~ab76g?tb2dGZ(19($}3IC2$TZ9K|7O(fy`o-TSeNm>sNnufu-Y#7z*o3X^f2EJ z3*~0z#|@$67avQH0o(Rde3)x z`emsM-qz*sCnol8@`SOl_C``BQ~w-;2HMJ@Rm)uaH|^Te`8qimpZQ|R1ywSDAB-$G zSl-!XQWI>1?<=VihW(<}e!o1-Qi2j_52YjFN+wl(q@$lR#L{d`oE;2o=3{-@nXt zhVwUe;R*?-l`h+#yrbC4YEtPNxH;UsRPRbJfS;`Qs)eVy)mV$|m`diVg94qx)@<*1&$_hlk~f%T+eJAWv(3*vy5F3A3%ZzP39U?)1>n(SBfkg@p0y;(Ens$TH>)geS5%J`4$j|pPRoa< z1U@#0+e@zc^#h&XVxws8ToUfeRL%Ibf<8}s%>GucSnJDvka;H`Ag>Nz-%gXJql1O{ zhnPhhLEK#RqFO^q=W}u#gB{%=>8Yb5l;Y?1I2^5@I!TR?sDvm`ieA`U-3S~NzOxIV zntC>Wy|NDV8)c`$^ok`2E?z`atTjk3=n$=6R8F4be7$gc473_Df$s1fyUDPfXnk+Z zBxZ-gZ;0dMwzu5uX7ZJ|84iA5R5M)JezMa;uR}6o9HuNgX4!in_f~Vwz1a^W0dyq8 zSK9ZNwU-(UH@p?Y3zi+=K{$``;ilprG50c&99Fb|6{Hlt45}@JH%Fg@G`P9$9wD0Y`I=A zUX}@TKl^jsQ?p+%cAeR_MG!2dsTwQ*BG3I*MDw-tEVYK_eAZajDrjh;BSKg7>vjJ= z>JYP!#gA6_7QChR#DMFoJ@wKi@CSzB6&#S+hDD z6N3D5e{=hBaL1!G@a!NY;^g4R8^lP^CdNA=p;12Zr-s}d>W#WN zNzrdsGIrhL!BsUTtZcls5E7h2cO=+c%{)8B%q$xe9Gg?uICSk8?N71R(s?)0EBk`- z_6<=6nRSFZeetmR9}{YFcYYjub!W!b$;tVgW#sBJ8FmlXx}LwyuMm%84F;nRlLZ(^ z&JqdqmrkKURjUw{RiO?B(x>yXmxk5U@E((dS?8)zJzL~k6g!Od@h*7z?l6E?!S*C-*JmaqU@lvpw3AMd(c_;!fBZe5pHSfT#9`SS8Tt4J zPPD3ku7Q|HK@@qwI|4(t$8QA3b7=}$AL2W6FQ*n*j}HbNkI(K|?TwFK-B%rA%#=*3 zCidg7{(L<=)&2g~YdQJKcPjNl+4grJLuk%W)YI+|0)>|!e>97GQ4;$`FnKXADqFZ; zWeBdw;g#=5-V2VuUTx@8YS3ETNutM@$rT0Lv9Tt8xmp2JkEa%TH`?bSSM#`_@CXF~ zwIJqC&FN`rn53j%s*MH>@2^il$FQ?<&3gTelVZ-f&4~(r4Z%q~q#~Bj`J7Uy&8zCd zjhXg$2?ch_(}^da`#EMsUxqZ;w6&c&ANIPuSROgZ4`#^$Lq(%?Pb0tg{(8ieZTkZ6 zkgfA(fp&kc1*#nZW03dV)RMD^?R!sXL7qzKOe)1|kCPPHVQ9$I0l&exzx!Gb>)~GV z@Xw!ppw&_Zs@rKI-o#qviO%ssWf+Obq#jM&)N79NG$%1H2E`7APpE7NTD2iN{L9O)Dtnay>o0&&K} z{XHfTQP`Ns9jbbzZhez8k&p!{Y5GS9q}1opHt2}A@*@;D#;zI-EISL3>x#Ioj%Wg+ z6yu9m0SU4u8LuGMTxZg{Hs}Hu+d&=TQeATc_zum`N8Zg4j%HGD94AqG)o;{J!)7j{ zP^Q^dwLrGhIV|)3*>3JCp2CZ5ZBWTAm6rPs4i1E0!$8w%2zwte5y%8xBtT1;3NfJn zJ|Qu2N!`t7E8P_n7xyz14E)Ni^DvSLau9YdgD4v0+VMeaeQPsbyV2w+J>;!&q4=xL zt9O_nZjRf|&_Mi_)nBn(r89=l)Y#BTDR{IjY_%hx_3n~M|H@a_J2R~|Rakvm@+K1( ztmnAWKXDC$E6~*~Q<*#(5pyoxLa;WV`XXhNE3{ctQ^V%CDFN88pBAGcz+KtxKyF-Uq4B zi<`e?pQ_X_eDs>0LL2)2qc52uqx`aNK_+fjUS7;L z9~Xq%$6Z5<8N%z|`LH33_dnh64v28$@wm85W~dDw3Xiv$V9LG=ueR<@KV0G|*NiG+ z$6-28xOeq|Vqt$szpCEYcK5nnG@KFSgNAh6-P7RWucwC8x^i4llulZdwCl{hQ5M~@ zKp2!FNrrk|u4Z(7ygJk7&dI^SQRTGr`wo5VA1^_rG$t)R-gunq#y8S;(;ZT9+3oo_ z7hrUEMY(FoOfP(;6j{i<`F4VA2dy*_imkoHV=Hla!yuJ&4$h3>)+@4kE$qe=HpfznJ~K zz4@Xk&Kzw8*ajphNSW5T9<%N-Ue190=UVL%y#-q;G5L!iySSh_dfb6O$_&M;k6eOu+wqPJ5uP zR7Gx@`L8c0`0-o4)6>&OR&S16Gl(*tT_Tk5tEK*Z`KwKNa(+J?gsau>1f~nqPJb@Z z3D1YjAYH+#@2brYPLVP0pt?3@&aOA^Zd}vVG{bf#kI0u?e~4K8l$e>B&CY8=^lY?l z-Z~{+YQ=%OasQxI%;wM&cU3QMnj5V;caV`EvUPs$#H`bVDeQ6XC(&b(G=zOp(}=}q z-+`-rMF0c&Bw~$0qB?1fpv;LjW1rV#=mwQlgT1oLD0#<&%r}!DT1=1>u2EZls^08^ ztVo?_axs5Mm%mfJC4p3tzpGe)vs#qcHS#tb9F0E1zsyoPJ&=xE1pO>bJiMT+tStJ* z!(OTA-nQt+Ai#4wC%Z)itkk8ex&e_J(uOkau=kce{t&3M#EMdJfSFe(JtceTWrv^WlogeRx%5pWNfq!t2g0k7O6D;y$OImbAQH2GJC9)bL&F zNU_E8EEN@7er`!AId4%C%La3-*(GFQn)oUuFAxq|H&h$z zB-ID{?9>5xd_>y8;IQK=Ow=qO%MS8?fsmIY47n7dc2Z{VCWz=7yF#Z$$GX2p5|c*0 z=>N{Gdu$i`hKk_lM7uXf3fBVsTF9J=r{>^TTUhyBMD<@zbvaTspSF9~FQFbU;z;6| zmo`wgj(0@$dMAeg$<+R#m721O`_t${&wcpLV4+-Z|C#BCbqEyY-6#4bZ+m3yHP+mw zA*aIqv#U>ri@zyO{=BPHy_L1>a!R_1bJZbU*q$!qo7CKN9V<0b-TWP zqpjiBcRg%IG7cyD@JKM{ysNfzJvQQoB7BS92hI}R5jf=jk&$F=Ua5eumzS?24ICX1 z2dn>925$7YnV3L9;zBN4EN_A&oi^qB1j3zh+1ZpAwQAo-#3VYjJ>|ei$&K}OY-7{r z?~MQ4d*mqxIiw^-i@u%{s2p8fd)bRT3QrVI4ZE`|h5$=ok>?fJQA0yOffU}gF(8BW zJ2eUcE_QH#s>_lyp>5};Mw5$W^H|!)#VWf+3hs=|kVe+I&-I)kCr=xI;+x5=VO4VO z=x9g7f~RFoRaT~QY_o|`|ImJvSdU~I{dDK^0?@>NSLlfYqB1JHuusYr9EVa|Y*JjL zvh+*%&E0so>L^1p$wWT|3XP@>ZLVa#pYY<;s&>xtZ*cuV1+6}%Kn42zzzPmB<6V{~ zpLizwb<`<{zT6}EJB<~4NhUY%OGUIYy=ua>!*SFeogF~|Dn?q=13_2m)Z@!Z@=}8v>Zl2*}Ztg-j2ta>vNHIUtiVq?CFNav`Ij=c|xf?kCv`yd=`(V zp&&D3Z8TkM|C{pl{#`zXvAflz#l30&Q8Ldb!rx!xj;GEP41c_*QIMHhTiKcW=v-)$;^XSz zT$cJV&oLSMaI|0}?fUY)5FnFRBei%&U5(i~j6XG_5`O7IVCC1}HEPgkQH(eqFK$Y9 zGrj-y8cyqO_ypW@QfYfurLRf_9h|q4px{_#qKD-nrD3qBr*(gps$(^${5gZH?AOBG z=!M}?lp3N1{b1QmGbSwMx|(NmONy`gw;{4b3+pS2)T)c=1c)EVd{0JE6buGD9kV}Z z30Ik%0$Oo?s;d#m^Z`rn`8;-Vx;{SB?o7i=_}}|BW)xKllvIHC4fk(W)UKA)t|(V6 z{6MF5ms%l}WYzSP*(QM%+M(ZqeK0Ml$}n6KgK~Z1Z3;>jU!+zFFNi;Xk(Dew+0m#f z!Mq_h%2(fix>6Rob*}FD+WcyJaU9tQepbJmnE?Rw$=nwIrUBVLE#a?8T;iHD)DJ`c zYo%=;JW7awHOAQ`{2Mc>5g+ZOW}e-W?lQ1_E46obI^XXid__lIfi zQrE9gHA1{ubm3dHo~rRR@@tOfCQtoCU!R_CCSJS09^@`*Z@c=6S?!Igqb&Y7tGwSW zzAeh#b$IOSS};ootr|;nk1s?jkEXc{Qpr9g9~;{dCF!_34Y`Q@HP0MlW6DBm17Ryg zvMXv$$j1kX3#Z);UQY`$G;O2gCqKUBNkNli7sc|P?{N--_#*e_C=$i*w8l}Wg7qQ&ENqjTVHVa z=FAb_Zm(S%caOQ99L#Feo-r=nZbyXJ5Scirb^PJlExIdb*e!`F*}K7kam=j)?Fexi zRXhRPOaR&e5L}B$jZS6la&zz29L%H9;r=>|nsEz>mm(kIqmL=!F7NjhE7EN~BAUM# zjG|bycaPv*O1H4}Q4#=wj`PT571R$m4Wb8<4=JzR6KyRjm5{4yW4rXZ>SJ4zR`3g$ zoWH4bMXVh_ASh}C@g(${4n*>xk1$8r>X9m{Xx;4LW_VmwO?Yxm3p5=yuS1>xz?9|g zmSi+7gI%?yaMeWAMO`HXPg8ZPz_QMfwboP@PNMqqbs<5uabQ7@s;LQb#Fv=i_luiy zBt{W7)(P9=OC@R;#tZNCIr&9=`mreFUHj&&wd9^?N%(K1e(J`A6dUcY=rf4jzE>ux zZ|wNtpZ@+P&gO0L&gkcYqED`j1(DUg(#u;^pmnl1DEq52BH`wx_s`dsjwCz~U~FpY zDaFX#sFG-p9C~Nmy%(UO9|G!2*)KQ@C$}n!*55bI_}1X+F^>*r;Lyp8{z_f`tdihb zIF~9IWNW5`#Ac9;_Pkroy@+LHB}w_U!>NQ-ev!x_+>|O5Va=4CX1a@O{n_8r1APd3 z$yvj3+o0>Q66o_iLpbZ|&<*XnnalQ8cRodl_Me%dq0aFe(~4j*FO(Ei8lwz_`@7^O ztNwQlHsK0k_Wxdk6+JfT$f%H(N`y-9jR!SaP|gB<0zP78v1NQ%hWjOj=fD01yys#;mX|I(u^V-vVM_=`Uwwz6|FTixI9?IK$W5`6+cYW9k5%NRYQE1*^5e^*K$%tI6( z4pD23*Lv;Yd|BQ{8?Y{Q?KIUmWHJ=&vLBA5vJpyniOv)K*7n@^^4-F%NyeU|0o7=E zsIR!`*C0`jn@BKgQ2)0y7pP=4WkFOo_ za~tl5Xr0-e2JvQP(e;_4`M1#SvE|p-+g$NGa-tM0w(@14!`>Yp_M zsga?+{LXfB+u;ssRQFYQ$OO*ZB17R?espc=(g|JHFwSI*j32?^tg3*V_vpV3Ycj3K zUCgixJZS7jSu%hzeBU-N%IkU+!OixZ199bu+BGiSRN@DkFmkNCR|CSXl2ZL{kJ^Uj++u z!eAo_E)>EGY<`il(ml6Dr4U)5j&6at76EX1`b^O(kwM|O+r`Dv-GbOA8A2!s7dPj-eHpoX=uPM@un_`zL{1l z)@I7rlwo;n&`bDXuUx+t=UXW8}eco17=(+KRZOR zu(7RdLd?VbHRp7L*H-%V0X+={A^{pK?d_74AoP&|lB*!{ zIg8!4tTdppL|mg%Na&$Sm+~CEN2r#7CP*rneKkHkO|WWUz}NiX8oFPfx$Dmu&e!hA z^rxZW`1sh567fUnB>rqL5oE5x8m0L?ik0AW2uaeer2RL%L^9H_EH$dCh6X^f4Js^T zpt#LF1!P!y`ILaQFf6{fKPhl;oL}s#Tg7o;qTwScmJjnrS97kg8xiv*qNkaIUd$Ms@`rP%-u6_~0#1H9uXvYV zIJ&wv-s|U%>+gJ(Jg#XC#X4|#L%-2aKPj)2C^$t%0o}RT4T$eSH_)7njEFouJb>-Q zU9J|9z=SWZ9v>A;=F&6&A`WPK63eRx3zQ#X)c*!wlC|`Vj*fawE&($V3C0~e8!u+MdC=KTnypl#C3l5%LE7^Ov^DeZ!1ftkrqa)D%9TU8MPH*-LA7Iu3OgQJ zRmJ8g$qJ~H6AfbhOmIJyD2(0_`V3-Sy)K^5rEusJnBBnmOIk(k2a$*O?prv?yy(bCdV z0zxOCe3s>IXE?|IkVVoK^aOOvE}+rU`H(0>3nXHG<<@JvhDrX-P#f#(lyr3cq&bmZ zCCg=iM;jUy^+`;{B?cMVn1cwMVB6Z-RyGIJ#nh!06|ug4{rW*o4PFd0Yhp)(QH2F) zJN$O6eMVBaEpZQ6OP@73V21QbiGUnzc_h zeDd=0b`B04%Lg2UsDLcaoUBc(Jm1wKgtS=Eruq_ZetpWXzMgxo&O8w4?~F}N#Y-o1 z>gwWMQ;KD=!(f4!2+(}(oSm(mT!#+(mzKya>WSgt;D)|`_g5>G1j3EMBn~su}_A@*hG2k_Z%$5;-qb7JG3dBp)>i)JJH^IPLCi33m(95UE z`1puqXJ-=zcmGIWa^J5WEYfa#B_bjMBxO=?IL#1Z1AHt{0kq;F0bJ^5`AwbqM6mnW zree8P{lpwUDLVc8l)hit;@?L`R97Wl`AI#`>18z_mnVkdFeuf28sQ8!fz-~*fyRQ_ zpQ6}s`hP}ou;z1EM?7l3B8Pys!pn?|jID)2pcw-3wbYH8pv!oh!v-t%)YozHC6NPB z+wE@Y{oV4@KyPm#aMFdvMb;fR6oOalgA~5TKs^C8k>hRdj#6I;pPA~#F5sz#JHPWu zg+1Z{{Sq1)+HAgo1?ZYyARPc9ju8+ZL11BFkGo(nfZDCwb=iAxq1mw+sH|o?yagdk zOG`rMzr>k!+meA^s@D6?<^JJ;NWh5^z$G0kYcvt(CpOnZoy((T1vfXor`zdHvxTM; zxGNO>-e9z!>4F3hUteExw-qQ*WbBK7p$x<}g5BNS&topw^`Z%Um+;q=0DoTAYTLFa zG&^qn23Vw4uI2o8&|~wPsoU8IFA;|^BIpj%(a{BgS4;!k%S2W^ph|LMispdpiV3LDv-Oq)VPRna$}lM?6y)T7 z!d;*MO;E(L?^8MuNvs|o%7eaX)vOiW;Bt)#%BR|I`R|f`g8-Ip1|1w48ZX%1{$m3Z z2ge8^1vFBxcKA^6nb{z@%7yazI6G_aL?3@Wn+_?<$+e#jXitK=knhBi-1LFxbEhy*4yd zh7R=Q;QgYb$LPqmB3L^ZiAEkuwQZW=;=cE_xt|UF{K@R@wa{d*QmmA(G7q2^8W_lK z?quW>ClI_80<{)^S`PP9V_-?y`f3g4{5cF#K1@HLAyV3tY6Uwp{~ZA&id+BMMYWuN+@AgU*|fu<*MAM9H3(%r3yNJ@9Z(A^CpT>{cE1|eP2-8rCy zbjL_{$DDh3zwi8Wet(>G&byY2wUqanXYRbNeeJz3AYdDfab?^WB#UuAZP0J>5lNqCIGBy3MtZ58cM_?;Ue|9}ziTJ1~N7|X{6GN{K9 z@A4F*vqeuQO~yd&e$ZsB)POPoD78lN6e+F7ezqkjioUn8VFt&K@L?|vSQQ54+Wgin zcl$-M&_I2R`I3RmqDCZp38g2{D>|L;8-cwb8$7}U)4HNX9{KmVZ6-ehf$V5=z$itfCNtcXo;U7j zZGF8PXm-lAiyJT2(~AsRZokJk15FSrQodKIPX=FJzAp}eKvD@;^caSw+3qGKf-Rmi zRSrIW3glzzZP~M~+mmtmDtXHr8}C8Q_~LMYje(h2xOff}bS9g;-BWvz!j-4!=q5mV zcf327{*09Ob86~)ZEcd78hdka;GqCzsdA~l2so{ONJSfild(vrCI*~u$>3LY%Spgk zt}=el`T16oRUXWjj9s%sbt#Z(;L9?~IXP7v%sLMvdF&y1d3nhAXyvbtJrB^x*@7`6 z21j%^sF>I^p8VD62I_gm8KjV~o26#f7|ek(+}6gX=m03s5`XFZvWUaM#{N(-2f}fW z+Y5L!9O~bkeM4&BXf&9@(E$!)+xHD186w3JJvZ!g^t;;O`vD}Tv@_rztQ*hfy+e$M zx)C=E2x8aGF%Fat@L%m(l1>y@DI_4^kpiJz^!~mBD*MNvpy>G~Z&FWBPxu^L2YVDa zoaQV*{PunZi1tWZt`8%Ph(ZpwD~Cyd@a2)Aho>ho7>$~^w6p@;ZIV14lgKdL@5sA~ zZ{B02$hJVeSjO1+djxIrTc8&KYaZxzkOv)DDxmZ%SlSW-B;FMuVg&;XmfWY%Q0QMe zA)$2Jnd&7lX*<(Zj?T``5feK_H}U<7;FMo|ZU%uY@9hDO&bufwfq0;8)a-hWgPi?3 z-mcD0hkw7naoH)#$N=@AYVdQM{7)`_pkdMiz{KE;xF$F>l!-YKdGCkm&SZIQ=5yI? z{rM4!_L5pT+)^e4{9brO#PYvCKk`IMHi*DUl8(H6``?3kQS=4@$ZMm!dqz?cCChua zZj@qcyf7J!hFZDrzyBEx3kwd`EGpkQnKVZGEQJHN8XT(j56SJR~d~&pi)2- z4ditnf$|RsH$w{x3mNIDQILNF?jt8>=ek{>`$|6%xL!cq@&S(qK>_5W%roMb9QZYB z+@7WLmb{>$3HpH0jmfD`FtAV?&)!xvAb6}E8Q74XI9+98Ia!MekPgN{wVz+V2H-QP zSD0<7qeVvSqoXZJD&K$m@FC+6B}PZ9DQ7-*K$mfsO_d>sPFPq(u72h;x6KDi094?= zlVr3v6!kf0=gLDwm2N3%k83JM|CSu2lXp$3tej$*BvY?A6b@JX+1>r-*^}6?Z$~>K z2WMv*fhEs%s_o2%GU`4_s^w{CZta&UtF?U?>Nlz}s+lRDiQxBJebIxtOT|SS?#B1) zhuP8kW#OkV)#lblyB$UXglIZzG@%mxbs6K!WW28$dxgUVz7wOBXNJbc#;RZX$-Kxg zAqd`3kt?jzSp%(ptx{XD@5;{xuqq>wN5zW3JpHWtl%yxRPZYJw*K3ac)VIdME zzQ?TguD`48OM#s3XsS@d$qx`WFiO*!58kEp*q9qZ5zV6|`4;DNOlna%_LYG@2U)2hX7I7YS6A7s*YO_xN)3*ES5iQ|#>&Ya z%o?41IIs!@(q9b;#yC)==CzrXJ{od2;nh*aWQQ^vv|DiV^N0;org z0;9Ffmjngpx%t$}MkJCu^zDdOK(cZYW-Q4m4u}!1dr*w*0xYwDb$UmVo`VG;db? z0aMODUw7nf7Me4W$D*WU1{1seRAabTT&QE!=CGmi!L@MZQ>?*7u<0&yG@sf`RcD4x z)@$=Butv82eljcE1Y*m`Tu4}$^TYL-_Cr#zmc=% zZLn5de$JIAx%ox+{iLE{c$=T-xo7&p0VkKG9p-|K z1ZZD_78eciyWX$C*Oqf5Z&o=EpP^BKvkv5k_-`aL=-GwHy474~tThN`8G)dg^< zM2bC`oq?s(x#}G_2q8RE^eE)$Xl5k5l7ea|u)cy%IXooy5n|CXhtXD~kn@vu!86nc z9PC1j0@m3E%uK5_IWXQ3K8!?;UE$8d;0KzuEy-9cB(;eT*8S(VWP>Kr8>U_lY|Uzx zF~+W+xF&=|sH&=FQzNs<`_5*^Gw^S#>!co@8KU*M_^AD{QgI*|=$uMb`&f}G_k}p= zp6)Tn8}L`FY2W^x@oi{fib6lD9qzfw&sv<-pkL4G$`5K~^HEUl(LKZ zxAjK46ma_W$+u*c4Ti(e{q0G;Mq5q;1(2_9!)_@OWj zUE&s^Zo{}3tT=LEp-XVojFID*xeQDYtm>So=OoVF#p03~wn2jaK(pi0mQcQaWycO6 z6n#(AI??`ncV03?``q zltYO`<=C5}9o6|8)&z;+?6Q;OCvtTAXB^JXh0A5WoO3aS=tUO{89&+4qrGlDlZ7g$ zI%Dl0s?e5sxYYY{x%&a$+$PJzqB(VOFd4_u-~h&{d(G{prJ(Qk3PsT!cjkzYqG7wS6z~p8u8aL1x$8!lu<-9>BM~2ulSr4Wix-(q?`(uc1{KY>41hC51u!xZ zbld)EtV^@aOFAsf|HYHb@0-$*y3g$qMuC(R^zYD;1H1n^p1#L6qIY;eWeH4D;Kd%v zxSQFOyBeDp0<Nv)wCEGxW{4_gnv%l0NNoBYPwntQCeK()2}7$YZ1`2(AS!?Ks&b z%OdQnZhGrcpZ=44MVXUlB{B5cHRSfKf&wPUA0PlQVb&}|0nFkUO0kl5T^doZm51xF z^WfeT66EX5iuB)3ai+sunhWX0{7%9nfQbiKy2_?Ms|G*&a5Rd*a-{0k6am%-0F=zV z=BpTh@x`a7UzyJ}HmQBJmQ-F=@;g5?L80)nIwtNoG???!P`pMBQWZwdPg0>G@Kgd< zr4e`!5PAmzS{(iw37<)O?4&L4?5xN?kE=|0)iAKa8?Lp5fsJx&Cj1VQqIG;c@iIF7 z&(-{r>Qi*F;+>I;mU71vr+8f4-a+Y}iD76ri?*{lG?2EEjuK>wdxBpsebh3qsg3`c$catyWv}RU8!jB>n ze%lXzUz*<~;PdZ-*`r01u@e6|B%adqSH4L5*VkHmKfhx#;V4(8qzZ>qm(#t79x?Wm z<}(rT(Ii{f@8QS@nTv^$ak*x}PenS3(^j{NETGownS-lyOSXyc`K3j-&oQR^DRFNY zJ(Ol=_35q9B?Dc|O@zHU^GiD364k5Wc|UvufB*WErqJK}4oNI29b1ZG-BC})4tJu0 z!L4h&HAXLT@=_6FC)D2%+bunM`1S~mc*7D!_;8jtr^gw$jlHvp%o(9|d|JT2Q_h_BaW1 z1orIi1Qn(WwG3wk!t6SR#XkQUODNK+PPyR9+lP&La$C*I&;tvH`<48->9B6#VsyS3 zgq0%TOlN6n$r$lvWx@t_=@w4M%T~JFaI2dETezm8c}w1bX^Ob zl6?{#maUkVnIU2@pp?!8Lka;3hwxVWzNbALa{!pywb zY*$m}JL3^H90H{rQ_hXT`z-)fTg8LRLZl8ZmeyP?kN(p8 zBY05UkGBmr57fy8jW|;^t4yf`w6t=nn+`Uh-;TfZT8-c1&-&sq?VK_Q@xmaJF_n9! ztE=k?--7OAJz>7-wl4pI*6e=#4_$n^4>#v;jGL?LN=Ujr>Wd4_cG{CdX1av%^ad9u zQNMPjCXVp1j6kWBwTQ6ra3dgz0Dx5lKu@& zB(eLYOAZ;8dpygVC|xuI=78w(IPiP>$I)s}53hTRWizrOe)%_y0=JaIwv{t3?RR-% zrK78B4oIbiZ_ihJE;sUubnD^)49jWM{s0scR9`SGQR2x#qy)8yiOp?m{?()w={5#L z7ixS?ObslBiWS>8riNzu1%ksVgJ0iAzX{Rr?BR9wWx9sFg}-Qh9g^%NOuIpZ4v{xQ zWa*ijK|ZK{5Wil3rqM_TFby1>hmbfl<8n=qfs=z=cxBFgf#vBi@C>2`HY*S7ySv7% zdzWs!1%BC|Y0aFF@_zK|klHWx$1rUx%ICIsr?K}FC3uxp>TUJKeenhhN3Z({i+J?( ziOj8kU*l9%y+3>0xG~Y;;)Ey!(1V({7uC*QqL#HP(+U=@qfD@qF5P0IW_s%Gyp%Z8=%gC2;1slG?-2 zJG^pZPmfKk@?+Iek*$oT(0nWfU#IqWc<@*#pwiE1(qZBsQOa5`IvHOO5L+8%bFlkv@hNUNd zxA$^hZzQf4-8{#sOVc~RS^u=+wq7iJLrspmxMWBDvh{UBeaZQi?yb$k102`fY zxyu{TQsGs_rnFnaYp?c?%(H)oRvNRoKoC^tKhxr9t3fj1I6j32U^OT}MY(`k39s#R z_D5YBUA;Mvi;hyFKYI$uqmW#1L1JzzO?TfB42&F@Ksi!`NK+^VTNQPerG1P#AxcGM z&Q(nGe(liG$@s{)mN6{YBT2rfggEy?)#;e zED^^0ne*42V#(^Lv2iZ#yJHVowoXOsiG^;4nU1~PPgmmto5%8ZzjLWQ zs9jp0NvW{Gy$KcRom5=N^WAx}yWvDbbLDQ*o0(Z#u8(&%?}-~orRH&bq2c*2l~8tP zj}W_;Pi1|i;0-;&P}WeukgX*lp;V|KjN#kxw@Hx&Oy zgNu$`>dz?ew}zIsw1UIl2)RkN{rNsLUeA0M5<2MSicKan@A`f*yU-xp*AhDK$^{|} z4wO=nlULx+6O@LP8jkM^nar68Qec5W1W=Duc)F$31K zfBGXeMsxONOWPYLKG*A!jaSQDC^mX6N%SG7G+b}or|@ttghl~+ZqfMrXohJ%PF@Jz za-_k7=TostyCwD&{Ab+Uy{cXQ{=f&Mu?s5$4T_KS|2E%jwd6N)@g`_c9KtUiU$gx? z`DMR81ma^5X$i2?KOfK%{Vsf?Pa^vlUTNk|CE)qUpoyqc%bEgx=2a;txUpZ72?oqTXqc%Z zK9?1kRS&@WAPYktvm(Oz1Udtjx#9pK?eF--HlGWg?PZURlc&U>-Ul)$6Lfb>xjx9! z=CzF4V(8=ZGE-mCckwoIsn9p2LuLkDS*bgVM^CkUuC&>MvJ~om!Uf|X@Ev=uy5i!; zgi`{edM zsj1MpofogO#_AD_N#fH3&0`hdyG!jvkOs;w_PJVM=#{D9QlGAdGEAHGk7@0k2 zP#)yjJtL&>qe;y5eJ*?2FnPMRv~;iQC^flg*!-xZeYBbokE9XdOQ(o=vZCw%p5nxg zBr}9IE^cQrC+_wd9**+*K8uf_D0(EZpP$eJLhAzwk};PqApYm<&7tTJh6y zMv20cCJ`hC46nG4aqr#>Gtv0d6uaTv>2lnN0t-Sn%l0JcewK$&BAu&l>aC+i-}NGC zT0JhYmC<=5J@IM#Q;0u-m(tFFY|BiO$6+S7^2oLiH){f-N%c~(*Cfx}gty~siFu2} zZYunLxd0oj-Q>ZIB6LA14oqT=#L6QGf@gD@RXG9UI%p|XFJrT6@M4v(u2Smi)DFg? z5;oj)^$cN(OJLzzjH$Q}BQrz+N(Hg@ZHHE8%G`30jlJ;aihXwLd=if3H`uupa8-Ym z;l?r7G*16{o#(Z)tY zSN-?mx84XH?pMu==CJCI6)k`0h~@`E)0EV8_2lIIO3@^4(_*N=^eHo-9=#KT9BethS;7P7;rh>hEXGcenH3NCY?cq&^mv}+>B|!e|7u3o zfuz?k2DR!iIi1M>#8lUk%qQDJP}(^b{0EgMIe|K1jGxqNay-_aF2Ui^(G9hp@3 zC#Vb=`MK2^z+ZM`E?F8VHD zsk_r{37a_T>UOM#>&v?JZm%#G3#;+iogFEb)R-Fc}wW%qH7Ir)0 zEXkU8F)B184+)vb4UNF=G2uR^B}y=i$0>v{gfelk>eE<^HVKj@(n~BfG}a z8lS+y;((B)*J@;?tR!|5rgW6JvY%54gJI?m{E)rx=Mk1OI}r=Yc{p*#iC+8_!^K?# zWiXvo_vz})#}Sk6ol+K`gGtjH3{|DdiN~g5N5%enm{{F!g zv*#M8DN>CsV+7R&B(EUZR?>hH`JD#GYzT^PoXsMC$vy6ng0`A|JcFV!*TwY&SC>8k zqY-lQN*`w>|LU66Vgcmb02e`SqyXN~@TL`uK8>3O+*)5rPLo{7io?H7R>nG)k|$?&Xlek-3rI6-n}@&^ss$QuS7G_&r&RMK8$-02*F_Eo$aM>Jda_jcD^rC zFUtW+lQvcM<8uKO0bP?$Fa@8tzRQ)C>>=>ykW;s3*`4VTGb@MaP{m>%uH`0~#lE(m zSL^MaPM-5NICFXt_qTG#B96%+gSu#fOKx(JM zh!(=w=G1<3F>{VhQL)SU^|y$brpb!J&owQu7a{`^Ug{gBWG$s(3rTB^i$9~2DG$xT z+L^x=L`7*poyHK90&$wpq3&;dk&SIkE=Z+R7ZYkipQ;c1MMATIlPCXn_41K|WM`@$ zrQiO1diUaAvFD44YWW7zZY4o_k8SY?h(CpD{v@ztnK&J4h42yaUcS_)pwp+=FtE~z zdzQ;OxYnkjs7(9&_2KfXJYVuY;(Jj#A_1r5g7<$HX>r}W6wI$dTw(5bW46IMRJuM7 zP>Tq}MWdVlZua$V;tLBKExWwBa^xJS8F>t`SLA|i)~RcHo9_b*NG>b+&B*sxjUtLr zd9LrJSZsc2Lwzm{Dp+U~13x2b6c1eRqF)N|DYCI0I#uAMtW~@h$_fMGPc$lOg)TDE ziO-quT~)(n3`ZJuL8v!?F@VyevIC+)i}7AqKH_8Zg|rATSwsP}p>e*y;h@^}*~g;= zK;Ez=J)L||<`Pe+Uo564=K zTa_8x(@amye~sWTM>OGeE}AR+c53Aita6}d9E^b3m7SGEC#IuYEDUDii>Vg>cFkrR zpS9(~)+>HQ5cx2&>$SLeKy$eQv!Eb}Mr3LyYQxh~ioB1~!5kPt#jI%jzCNgJ(QR^Nd~O4^t!?aS6W zgp9*KNusNmcfXgqwNd!VFkxygP9WEuv2RH1Q^`}I<$P2anfqJ%VUt*=eY=vXIB}Z$UXdjbKEI1{jQLpW;meXwO)?@&KagaKLr07#fNC7+bGPg_# zh4ox6%Qu|pl(J1;n0b;@qRQgX*}!hL_KDZ0PoGc>%Qc^1VQDxrB3~HaTc6;dkN}ih z!86w-mvR0MWlB}%%I_aCHj`RwYNp)H_9I9@gZFV)R|}6xvMYU!_)*X5NDk!caQyrH zgHXO)DcEvdI6x*QCpGmR8iZ>y6NJoirEGtZzS@ov7FB4 z2n_5ZWCFZkr79~md}%Q2edjX{aYjVfz}DJkp*k>~wCKnCV|6*Y=vRHgjzo~8-MT=v z25Y6#TUXj2eR=H_v9Ylcs!ykO-2AfGfnlKu_of3ARou85UY%iTKK*3lKaK6+huk*< zy%pnDIes^nU^wyLOJ)eMLn{Ca!Owsn+WrwEZA|)Qb-!r)4W=Q-lo}610&Gd_?B@Sr=>7!P)j{qbx(W;6R8&ByOw3QJc z6!_Bpf|k89A`;q%N|<}oQg&{1RmLWu)XSJ!xecn(y1E(d6=A9+tr(zI11LwNz&TV5 zR9Zo?+Ng#JyqU!xYP-e*Dg0$Du!ao`3;=y8U%i+F@S3{lQ~M+h4PPiJDWxTQ;P(v< zngT8X5^X^iWSyM2TeSAD%m5wGfI)FSwppeo9WJ0qo?l(z5)gz$Mn*F1_7%S9B z1_k%!%}uYN8#%yzU|i_pNR|8PL;d~iaooT#I|i^+PUUi^DA~T|Q#>{}vFCw3Y(eEs@0Udjo&lH~F23uS>dIQ*!kw1tYfnxZ^f`{xVg63m^+ z!QGPq;@en%&>Xy+U%h61xJ7wl(fpO8uZn z4l`X{{ZQ)q4*w0KT;}=?J2NYis`S03sQN!Tr2=fpq0F%D^=8%Pr#@35{O(io@JC<5a-F7*ef&3e!)4J=`08Lk_bkp;tVZuq6iZUVs zhdtmw;3LVBlB|hS#sJ%Nv_Mca-!9ssOfNVI6${ON0(1#9)o^?~JR|`G)V>%A2{m@N zxv;;!WMBC6gRN$M?kYYuGcz^3rqn*CRLRVjy#AjecwT3CF+lY~AD{f*s%^jfHj)`* ze3Z?_6H5=cX^|Ds?2kK>_2Fuf1SpFu@OQ&)9mp&n-CWTeQQ`rL2OHiCsNA+w1Oa2X z4A;D7_qWuUbF56cO+>vO2e|Rg$rO7%lZk17Oc_HEoLMxFu@*NsWk9{?pYNVS!6*EM zRkQzU0u|?;H#ZwR9z;{Ase>8kG)#nSi(O};^b8089-i!!4xL=XuLS>lzY@nk<0b-G zDXk`Fr)}e~%mwq|R!GefoKjkQ^F0#G2H@Q}WK)=g$G+>MmdaG49S;gX;eU&(ye9z2x4-a5k;8FY!$-)3ilf}I^KZVQ*XH{yqG>Ed>kCj9 zg5JRKIY&lO0L)dp*sRxk1RdV`fLw$)IE{@>mh&a3D7-p2&};5u({v*uB>H${u3$7h zyfFIHlgSHy^lhTI<1PoggVk!%0jY8{~T(K@V+jAV+Pn+dIX z7rUI`sB&;{@?-58`Mq9g*{-hfL`n!76*qOfXr{Kl>H_QP#qfzBBRUMpw#BLY2-X^( z`^F<+F8k+b+Whd$0M+5$@NEu-{rERr5Fn_@62vDYs$F{iAK>i9pUrxMM;eg-=*b!> zp~-wGybU{z)Am{|j{*vClyWHCozGfFXGlZm6xx@9%tfT4YXxiTymeI<`d2QPaOIIZ z6(`?RflTrK;*CaD$LLJnSatjLeH16>%F=20-A)BSJtqcgL$SxN|Df6?Na~K&Sc$uu zXv^LH3(H9CGi@K8NzW)^{vU{y=j2-B9SqPibH%P`T*eTi`rrapr%QfX@b5>3jmG5Qv@3*!t(L619-XFt4 zO3dSfEmUnsP0thwaK6)>>9&yZ0j6K}gooGW-sJxc$VAxopd1oC(;-24-kX(0koC3B z*cl#VXcg&{e+hl$^0?KghO6kRm^nj)JRo+#C(hl#=TSiM*2H1wZ$DE=HZB3q+Zw+h zz|1oQXr6^Z1EWFn86Aq>y+9u>GuC237h?dDVf>p-a&t6t`H9=M^5)9PVvhnPX)qnu zd+VMoy!XH0+~!r+@s1Cal7DOKCo}Tg7lg4@c+lZ8LhsICV`K%v#e1>O`t*cywkEVF z%B5huD7X}gu$IWlFaR-vrzMOr!8)d#Qv_p$1Q7fea<%yr{ z$*Fg=Z^>I^y+;(WXYF&lFaauIb;~ygho{}U4(v-ar5;|LP0Z)6)0Vcp#d>{wl()%X zCi3!saGh=_TYN&!?<$r-6M`TTK6w8HwP1+Q6-MLtn|u;t`SD&0H@mq`Fqs23NBdtX zOwA$jiK#!K28F0ds?702UC~xnKUQFI(YS(0Tw(82vcRn$a za36f4!Z`iir4fh5ZGGrrxA!Nxn%ti1#VaB}KH^w-BIByM7QUofH^sOI#umRZk^2X=&Q`Q!*}aW zPUmHQ%avPuG{xZZ77>&<$H@aq#ItjO} zf}0xw6miDm3PcJPiA|WAh>v>{1s{#FemYcM_(3EhC@c1037@O-=Jl%M*EwdZR<=dxs>mKa=1WU~$F#A1jK>#uTaO4;Of2x7AyJ$QK3)oCDW>wmM+lfr+> zr0=yX(VQiC_Weu%7p&#Ekm!8i5k-xd3P5R(D*DT z)|~g~lHTMA2Ijr5mW6I$-wEpQ6`6r`!fi867BK%NM5Rfcd|d%5=b_4z<*^j4+#{d^ zt+vw~5MmRR-+2^$Zn!NuNFaOQTxBC`G5$pVc0yX;$M^v>Dm@~(h7UI5iqZ;UT26r8 z>MDZ(RccNQS*2#*f$v$4HL3+?{r7$2tfx=qx84gorKImM`vz)H=%b>f@U0LY9yzg{ z*U~^qqPwijIyTq97+1ER3MA}_dPCdW?*krzIwLbGSP1pUBmrx&t!Vq*dHx}5D%6E) zj~6|ZjqAp|W)Y%YOAZe12ydD8Lk9N~@=wXJSWJGRua+m;>ZwRto8mjPI_44< z0@MCdcazFD-vOKZvAUo%Zk_daUG*#PEhp`YvW;dPM!;nKiK0JiVWPGx2?}WQ7U4`W z&@n)Yl0K)t1P{wK1yNfs&j7ej%r_;A)CnKWPL~W~n=>&QgTrYw$Giny)K6Md&CPB` zPUf&2Z4UlxV$I>R5A(f%#m~Qwrs@ArrPehK>(C#oJQtS_zf7%BHpCDfq0tZ8y>oBa|=K zXEw;I2~>P=wOB%^utXJ9Pe?pGFMzoRV%fC+z$yR4;L3&d?b~2*fhu2^nVNzgPI69% zj?WWRK-^9*CI-drNhlMTBArV-9Fb+y8!cThE??fNJ4yt%tCjFAc<&&)5uR@W=QI3vPalfo7)O6Bjr95WIWj&dOTi z;o%jRoc?Ar{EKt7|No;-TY9J?JJr>N?~bJi7FJ&$;}vEglACdU@mO%&6PYx?foux2 z%L!X3PdLwP=!NyQiF{@BP;cAD4HEc`TN(K{&{1Er@uO-Pp+UCQ1YyYz3BL`0#GxbJ zx{~Ubp0*-9%stTjaoDZ3#;K4`IYJ=Z_!# zshEo{(&UB`dwm^%BeGo@?Bwt6Cc1`#rTKqOQSF-^Uo^_TxJuxCUSWi*}6-kVSpkq)Udi8|0Z(Xd8PIl*!@7Zy6R^ho*aHQXE^FN$3NtZ!lwnnB{unSIieUWdB+k6v25fqYCe<7XE zZsB_Gg^*BWd;7M2@& z`-!ccLrD$SO>LO87D#-M+3a2uf{JMcx9o2nZi^eue#BN-r2nV(3f<=kUHE4Ot_K#s zqrb+)#v4kq+bwf96JLn{K0f1TU4lxwn@)MX`laP%ufJ~O_AQm!uw$q{f2oOicHY5X zXGEt>`Tp_No-uDalPRr%65azintZxW03g+>Wva*1l-3?fvNQ1PDu=Zhtm~a6JOuJ?! z=d5M1H_&%>d|hym03C#aI7DP0jaoshj0~9**bt$lcFo0Oj%=n-WTdb>Svm<~m5qFQ zFlVoW=q9)pDQU`uwxbZ2T-CO^??Alp@pZw67DukjXQUEan0)s>gYz@A^d=oF_LppY zdD&P5g^5XgE~|AlqA$QZY-zBrg@3yNq_M{l_GKf8W*iislUp4?;CO#_`-bRpUraXU zV0i`6^Xp&%wTn+=<1d23uCB62;Oa}3S3KhS%3u)6kqDx+<&B+=q2lx>n9sl!ufLuv z6fvQp50X}B^NCYmCP8|&RZr{HyOE94hVye6VXBx3W9aE#TgPam`i{`S@)dMt3vOTs z@B5$N7;O-oWjW-cJ}CfZUD?L~4K-QN`T_5bj!(#Db=N>*_g3wf{X$H2v5wxMbh@ft zY2zW9UZ{xvIwOI`bpE~$O$nxf{B3mhCtwFifKB$?tNWsruyU9dD+CYU(P4q2qLLG| z`=TrGdL}F^YasMFJptC7c>tI{$}H7?J>_9Y8kiRGhH9bK*2fMjU|AJ0)Zv_+^{@x57ba?mA zRv#y`DS8*d^xl+f>Z#UNK9fo*_UNkaX|nQFw@K+q^M)p@>MaQZ> zs6m<4u@|16z-}hPEwAx#9&W6qryEwRvR7m)+|=esgT{%E@-1zkfLoN*`zGPO}(-lcGDT_66PHdY{72<0g=^=H}+$pP+o*x1Jlu zY0nCehf51yuLQ1=k2c^2fATckjZI9{Qmj8{TD;PtkIxWfHyAX?IMJ|zC&oEp7*Il& zu@2}G04KFBiI$G;a0+e^zomz|0J*_WVtbvSAi}OQWWT+1sdM?$kPtK)T+*u~Z-!lR z8Xa;cvFen3*9{`!wrGu;n^ApIXM?#nsa#3s=iUvsc6f%r z`kbesj!$TURoFV+>Ld-9Okc07{sDnI<^359ldH77zEu9y<|Z%0k;hHeE8h0y4%YA? zX8mi@oq8;{w7rH7hYzJnD)YCS!#V3@@Y>9iu5Fs+SR*1EeeU#n*G)s-bdGcwkcC<# zM#+9tP$rR ztkLtYluBm$*fj?Wl!*UdE7+r{S-+Ky>={VV%_K+8T%X5SiQV zw7kQ(GyqK(37SEDdGUWoCfM}0Ak*_|2+|J2dh{TK94e3a1lsmmCwN2LBc zeFo|-IL1g!tx*RgtCg--nAg+MzuDr@{%+^$=RI7Xl(NN7VoK4FLmyGxHktnz?6T)Z z2=so8{XDHeaSe{450wykz#8lP#!}cj*!z-??_JkL`5k9n_A3s=(e3Tqw?4-Qd9~vW=!7tx5{Fd!1R_`O z2O%9whng+)V%a7Op=udu>#qja5K|3%`>zTcqgNJE`{Xz`GO=G{y{Pa6g99tDz zQZmB0t-qku_#VDH4k5l!68;y)@2YP%-U>cZ4*pH`>$8*>C)XZj+@F*OPaZsR@GoJS zU{!=J2rKLg2n-9DYxqdIMCSgonR@<1Nd(g#A2T#)G*8hPG!_I+SU}$$#{LCvtFbWo zbl!Yy!d_64dm7pyQRlSw1dY)r&8dn#a<;_jJ6saHK zA>>|#>=OZhmSf&UpW0m7J_D+>Y~!v7#(ow&kNz(VZ$5m$2KxpWM*!`)gn~j$1Px)M zJ)on49RN1);?|aT8{0IvpZM|tRQ=a>GR%J9om!OjPl+r2p;j^q;%ImAun4DU&4G9E z9v-Md?z=Bvyg*S@RK#CXzZ@~;evXYT4vLl$2UtM!sftr_e5?Fa(oTU%y`;VAHEL7| z1!H&Bb=O#Pa%E|4w|;Xq_{J1G)0NI=`j}+=Us;{+Obg!1%FCOF)-p3 zR`4vc&7xhK_2ZA8mHo4`i8qFK-~D1|x`|Z9p}5fPI-}DhdoLq++%X=KSfeC#h)q#K z%-D#>Fae?F>PbAd(Kv;one_yczudv>-l?Xj4H;`@g#Y@I-_|E0rd%n-3mQ>r7H?*~ zMEW`0pWJZ+jg&(b2QW3(&WS@O5)OJ4T?#5=dS$#hh{hhiVO=7O?hDCdAXlt$`PhVO zsc%bvrH};gv1Q1H7Dan^HW@Nc8N4GvpuNn=Maq7G=L2Y#H9<;6oh5NVHbis|O#mBI{61mNSt6DJNKvXc#xbX#&u*O=UrR=} zM{p~DxiURFdLf`g-zjjm0i%z>5|l!QGDitT?h1a3pdI4mY>@16idF+;??%>#!l|JDEwd}6KvicK^ zX+mxeEwuN&a16`NcH(15!ozfVkh+y*bQ=-Wh4YieG-7_E#cv(uIym~>p-jkilgAB5 zuxWHlx}fsmuW<%HCO4kDed#nhw7l#Zd~@*Z(YbTIoBRwb^3$BuOwFeb92z{Oy7~R4 zyT4Zj$DZkkb_Soyvr)MC->^g`rgM1~l@f|(dHQD`9kL(Jv02JxB*@gLt3O<4!d~|# z(^(hTo}v|Etpp2Sp<0xnZH5ZbatmO|jsY3ya8^l>~antE)-X3aAu zdD0412`)oJI>YB{EY@bUG^0bE0wm!}5&jIRHUaI$|LKwmCH&|@P7lAoC)Y4HyqyrY z-~6P*dO;*lqpMZH8q{6I!uU_^raJfc-M~@vWd}o(s6<8sL#y@)EZ;mmr6R+A{5TJ@7(%M!pEXiU9 zx<=>3E4@mDr+Xq_h$T2TjO%b}-RR7tCi=}wQS9!#bt=d#h36g)q{-c$NuhujB`6v+ z9AB4<$?T`)=!${X#Ql`!qxiQJ-jJ>hRC>r#l+05$ESgMhP0ypsm(2zlL%r{O(Vq>1 zkCcEU1zF+49-NbCRq{l}QPLi?jX5)47Ga#&4{TlO=A{LnzZ7y8bxji&!>y~^-+Tv7 zc({mZYT^!7Yj5oHgCky!e+r82{VC^;W{+Z{MF+C$@{2BVnWb(rKOoIOB(u5MiN8t6 zqw^G|u+{2CUY1<5iz3e35MDxo?#&9{5RvASx4r@xkM>VEN4{z4FN&s{b{K~|8-2|{ zY5aYD&T+^Wv^P_kM|(LbR_a=>_J*(7Ab#j_p>nd8CNdYN#LF2-t2wLWK^@VoRBSi2 zAw4j&TmWeJ9CWRJ+@siBdpmx}syX-w(nKm3V6QKpby-v9qd$J=X-nRtIHRh2S4fW| zg@%rco5C7Ph)Amqe5g~j)nG%8-#VD^Z2s`xT=d!2SpVa}iL;i_1J#9+K+b!f#hvEQ zBPSTi+^4&Xx-7RvKfeJ}sx^hlyECXR@!yc!vBKd?>?CZyrjZv+BXz*@@u%{9F#ORa zwtibwa%dnuBB3?EauaYGkx-6PW~O`vmz^>v!mocXA~rE4p6~LlbihXZ8SyFPKCu?& zdZR)2ls$3ls9s0koiZrw%YEw_vx=SA_>J2#Yky3X=q=spFVp4;+hhW9ku9+t2AWAa z9YBAp^~*x(+ZIrcNrS5%h2MD{16Y>4a%lj*DNgHy#BSS@O4~=E#jwh^*TDnxHGr!g ztlt895?fV0$HS8}HGK)1yb_Ac@6I)7)w?hNV#a7R71+PjIk5VL%6*rXL-QXRc0;Q! zAEUuS;x2Kv>}bt|aEshG;6J462P=@%j$ZTXeF1jVpyh1!dVm==TuD^`meL1kp1qQTQv zhw!?InNjKD0*Kqr@Y03*bI(Ij`{yb)MY9=QNpomed<|c}z5qR?Wq>#&Bk%~cYySpR z>KD~VCnsxdk11Dyq0msPu+1bp@LO4*T?cKsA;D*v&G4gs1xX#t&%JEN1kdT zF(eT;PN%zCtin=Y@xe|=h6}5;)85m@Gz!Pt^D6$=b*Jwv9Tdq(s}bYv*8^!7^Iy6{AGPP zF&J>9k?nj%lZR3^_3bSJ7sLC=rg`h;d=2HgF}hKVHh90J=imM!!*i8wjJg4IU=m5s zu|8nhRPH2^$fVJ{y;EwsfPQdvv;w&GU!B(aJG;6#z(Raoqf=q}6a|QEMoAea5!FvfnGy1BtTRvZnQzBwt))w}+99gNmBO%JT3z}RM4jTh*h zHYc|Ok$knS4$!dof)4TMa}clskja5pP}~QTzkGNM`*VWbTSXNced5Wa>5#>Ua4R z3k&P}j~{BU?wB}nI0roE9OTJ$~!)OKI1;RnZ{W?n5AB3QQvyN&pS!NWs^vBVU zv5)lAoE;oE2tg3HH%Pyy<55Mp4ixw!0bidCtefyVUPbR)VDK0V>?sKAcYzy5PV-hf z=%52aXcWK?Vb-aR0{aNmut2Nx3TNu1mX^Oj7N5pzTfnD(M_pJQ0em@S;0;82dO4ga z6gp+o-P05L?M>(iFr@na{X3d14{*K`^u0X(lg^4f4XB;AK!0z^z;F$;g!uS&MX{Ss z&}v+bn}O%}H?Rf``$8giy^jheKNrx=7(@Q!eq1Nin@C$`vl4@1TJN5Bs6A89n0g-aSPs=xwb{@P! zJ{((QLHaT9rHP>mu-V;&GzWw2!M3;iHc)IQHAFX+dTK%fe!+Y=kZh3k|03?Kqq5w- zwoy0Z$A9q_rxD(ob#RU zjB)lD493_#z!Rc|e*KN<@$IRVH{mq`&I|S6L`8vnYfoyq_m{#8)ne0D z_&@ROkY6z#{Dlu>G}a7jeQ6l;DU zK(9!Vn^AfO3k?T>c(lY~OZ^c(z6f~WZowafVk%u?)Ck~ z4p3Lx=t%IUjdSwY@P?Boq4sRj)tU4Q4g)KW)-L`8WWU5eVPGjOQYjXXaVAfAeBAtO zj?El6BG~sF+LbFAyc?{XE~2B&I_8&K)!Y2$uL$Cco`;AQX=JJjTvbO zqOZ?$_a0ksaxC%9&F#N~Ul2ul2T_uWS5au602}$axJ1N%Tv{v31ZM}s68B$*Yt7)0 zZwh{I@6fvZc#!jc$j4n{&+jm8jWxF5>9-3p8>=FM59-bJDQ=YsI2@X-C(zvl$i^Hs zWNjX80{;EvaOuwt&y)5oIphA}kSTkk$oP*&eev>|?#H=;S3D$4F2+0!cN~|-EY9a2 z`qm`WZpbJmR(2CJ;@p%NGT8m=@xal-8a+q=gPd(xIN{@?*~s+$~ zt=?gpyVT?(U95U|ImhsP$U{=%U#+oQzj>Jgk#p>V`ekROd?kO#c<)fcD+gyDq19D- zV448v;ek4;~KFsO9Ifl8?O0NY3DLJ}1FtdD2U9Jf{J&%rxFh5D}>nh^5RM*As?4ohkezO0T@1orKfeV{om{F>wg%XbH zK=)N%QbUBp6BOyGEvceFpCgeR4F$Ka{{hAfGDU%gL<4{F3>-MF&d9$JV)PRnnO>@# zz4n%}{iw5$F-RyGpU@WINE~+1D#+v8Xj(hb`O-Pv`|Cjc&7G6`&(GZgukKW!PVk>} z_q!jT5fCy9M3Cn;6&_T)Ut+Qg=dAt{?HBP?o)^EZyoat{!6NT^U0c4bjWv4HR}`MO z4sio0=rSkr;LOXQ7R>I_Uh$O-z72=6sut-Y<5P?tE)Hj^FWEQRIW$-9@pHDstq7*= zoK2c)Ua#wNH><^WRwUnxLe zF-uFPqK|_=07xol4^bRVRJm|UL^Buw9w<+~j<8C>;nrPM#rZd&R`EJbFUaJfm5Ql3 z022Ereb{*7ZwL!2npi|>M&TU@81g@u`La$YmMm#wpM@>TQq{^YFF%Ste`U(Deex6K z`;lPX@KqXk>M&Q-N(qSb-`Bm&iPJHSWrzJr4tZdIo9 zsJem%cOY~uHXW%4!nqsJdV2vy<$B$GCbv^tfs1{)`i2HTd<_#5Pk^4veq&VRKa_aL zbY)zpsG%Xye9{25`f8F0o^SA zwrIwrt%Hp8^xYmI9A>>HOt^gCz$AgWx%v8dStO`tMb6Lbi-wZ4{**}x{5^BFRU>V0 zUu;f=1YBh#$iq>Y_`;;vkolOQ`*jKU_|lTBJ7m&=O4_!;d!!q4jaTmC3t1zX`x~<} zm`wpR_P&)wVRa~57fFJK(JzQSf4tF9N?cy*JZ`1IbYf3D=zpEiFw8$V71wQ)ptqM} z>U_1v#)x%5Sy_p}!TUF%I3is4S-Gcbq6HhI+A*K-dR}3APtTKs(Ah`=tdtftG1+;|TcPSj@+{U>#!xJ>0A9V3czK#E?LK6Qkl1 zAi}uQ1!Q1>1w9_Ru?s*!ki%k9Qe51Bj|)(PL7f+xWGsshpsxj9G2qN)W@di;Tg%*> z0m&)~3EcA7`CoFpKK|o>PVIO8br#t}YnZPkO)QPU93PylYI$F^-_EkXy8Ld*Pr7$H zkxLba?~PArGgst?r3weHgbDt|T=Attv3IE2g+f7>)C7l%DogqHWG%^SDH;C!Ul1XB z%O*xD)yx)LM6DK8e21B=XY)dk?oGxFztu6DpqIzM zeP_r#2wsxhJk?!_j#C1d!sv4LV&v%c`T038fHMA6FJpnp^x!AFLuKZ`(g>U+NOqtb zkZ%FB3T%E_+s7d7hWalAh2^O5ei()R>ftw^-CpOMFPcJhMmE_ooQJE=@#J)zJdJr- z^Mm@}?mjE?LiSUt;6HoK$?63*B%g1`?n+tS!2M5XAwrsAhz|qDvgti`3qtD3ylJKp zXNp-L7Nfh{!oy9MYV%2~&EAv$_35bzS+&aFo4U(lGhX{|ar`V&_Yj{}{`rsqIXOAs z)%l)$mf}XiQPo3ed_{)h+taV27&KPZtAQ@I5_eYC7IS5)Mi3a7MRJfx8?aive&1(4 z$hlGJ33<4?ECpZBPBc9mJhHmmWfKvnaS=^nUh!>SNMDU$U(8I z9eg1mA!IbEfy1SF$Q@Le8zwz@78@Qut-iPveWbk>MX!bwmjgUclc$yT_FEvIp}jo^ zq)-jJ;<;hzzV{#;@6&u4nTTVhsiys`&RQY;SIt?K3TdY2HMAlX)*JjuQh1oxZz5{`d()4BbhP;_#HaL$Aezt{~+tlz2y{e%2+_4%&EM?{X)TvN!&a!MEwVpmUF{qIrILBY|~}CWjJc!+GT38GJ4We#*@GI@8-%Bt&aE z2V9&T;o;)ef{i6uxBMeZ~CGsn~WeT27GNHtK2{X=^j5zgb3 z-A;vPm^-aj@CYW$Dk`fKH0U%S+285-<<#sP%$fau9eejpU z98x|Ee79r*559+o2a2ZK!QhQ&20+tO)#G#hL@grKiWSon`nZC*?;85)LsrSf zFiBV$IQy*1M(<|0gUAY=Bt+U`DncGb`yWwbEl@WmT!L zr`IUAMFs9~9|(y9BA$u;8n!m`!2flN!n#*WS5d1n4s)hLDnaE$Oo+spLgH<0#4(F& z=}%7MA#ru@-Ewk*U@AJ)Z}RbR7gd7RpP@4JuuOo3#NR)49%lE<~RK;g2cE>T<#|^98k4T zf1^?;mg&9Xs@_~;A{f}GL*pj7{iTneJ9fQ0z2Sh{x{2)Z)Z0>}tk63B+q_2N!D`vjW&0jx)?>ym@{fHk z@c*?S2gjoax7mqdDNKps1C*If2^07M6u&5co0?c0x2Ijl>{j}GATHkp!q`P(O?n1~ z&{y8Kt)^>HvB?B5=cNRnJbEMwWTlP^-;|ainURc;8+V(Vn_GeG22jL0Fa!k#{&@S1 z))t6nL00zB$>`cZrV!9GD?`E(i-_%C^)hSupYJ`P@OZdBA_QislCO&f!(v@->C#C# z`OyofZid_>r&tb@*;KSiU#W7d4v7r(a(4RXus$BZ2>`YQn66GtS#&@SxV~!_06M`N5BR*-Q9PCP1r)E#)9=`xNDk@GE(9ad=)CZ3Z*CM;Id`BS?B( z;4g10bDgR8vE3Z!o+^f7aZ0Z^K(?!kEf3>~a&|D>y1~H%-`oa3C$S*Nf&$5%MbA5= z&lQnO{=S1BDapw_Cn_BD3P<1*caWs$P}p-A>egn;bJ)_ngx_-$h_r<=-X)j^5(Pp} zX;Fp%aD-Lr9%77wOM(Z6HHrpJ6FefKHekF-ko$BgCoXe&?-HB3cF|`^0ik6u<0+_*Bt!9N^3ki*Yd?{SLXp>~00J2W7-~_5i z3v|f-^dlyW$$LYVi-? z2mKX^jSWDHSk0Qv5MkyroQIWHvAQe#kgDBA> z8}M~`@_cy=q)7PsyJ~82-fNHeZ+^>LSp4|`xjWXGz_5G*a2j3+4anydnBy)X{i4r7d3E4kBBy&F|2Oy){5>DQ=LCpT~F9=fL)Pg-{D&>+YW(}5AgL8qI z(f7rsDwhYj9^7|=IEh033uz;Bf=$7MLg4h2;SUgVn$r!f@p;7W5pkM#n3YW2u7z|y zV}(1M1*y1QDqmojN-%$3#<2Ql{jKv`T~D%qYYT`%Ms=;3v#uXpmIT_b^lLw%q68B# zdjUO{rsWwp1-+6nLOw?(D!I!{{X5ts{v{=xpdz3TfVgV`c%hLL#PEdMz^VV3Yw`0v zJjdf1KP#(~EWa)|CHJtgyGk_RXn0mKe_$nj5B#c2-t1`OTzyfBo;f0v$MV zx4YcjuPz*r0ph=!@I#qDUKxt+151GeoLq9Fjy zS0Z5t|5r$BeUscen-KFh#n@!)7vnj`(geQ}LH0O%`MT7^i69B1rh*8>H5dkKi4Cs*z?5hHB{^aXe-n84k5g7L_tOUAm6 zHA6YXWQYz*S5mUF4^&iCx;Am5+YAetA(V!@1=7m2Y|}_0VyoG|66hU(=o(Bp;6iMG ztbs8M$->p9wBPfu_#1yyha-`jHWwWHgHMEV0$2Jo_MqtWfRr>eDTxfa*h@=G^Gpy7 zkgkTRKr%SbVN(tOW)OAEfX^1Wy@>So!Gx^7U>p7UdSXaJ1`iChyr!^NloC;vyYp|B zjE)!Hu~9-~(m0f(!At6KZY6L$eG^39)t3K^Ky@$rTw5FH_NzVa+`45s<#x2w_*iRu z#*eHIKHb;Xhulm!uN@^D=O=q*aCQBZO+yE`i!6u1!1m63q_p+wK&FJd6fmD-z~7dP z&!jSj?A#D?kQ8khi8&DM;<y%X>PjJ7{A;Vk3x^nMjLhMOy`1#~Y&U~fRDVBF*C0=g*{)GYFq z{zBzkquu#d+pWn2-px|)n3$Lwf)MTVK|K^k9$ud*28}<*6n%CuHmNf2%vkgEBZqM= zys;ek#ut;9-;^|Uzy-d?Jq;sxH!FyvD8c;ej1`<3dyh5=S90}F-@m$!_*do{`3Agnv zh!>WG+@qh-(}!@|ELy^i|ENp~#_~7DmVEqQB>WYgcQzTWR*G0#Em;ZZO%+^j9qi3k z;{??Px!-OudL94l`72G%N~^ghR3_wJrO@D?QubF_ zaLu-(m8^}GFru6tZs-A4f$-K>fxZA=z|#N&r=JK6Al>&l;>1-ze*S>`%I$6!iRjzg|vvJbZr2u)B zI&CJT(mS{JmLRZ&ya!GPIP?S{g#rQT$q=&g4lD#v$900@%Lv?KaU5o|2@aDwaC1kW z6G42~q2VxL*NfAi$nU~aQuc*8RwRehyH~tw2+i9!+JlLm$Rys9Wvq0h&|{&@m&XS1Yy+m6WM*cY_-(2=iY^(^*uXP)$8VER~g)7uc@ILWn8?F2uO! zU({^1l60tzg66j`7(>^{N3~^QbGBXJtJG#A+QVWzav<|KXLfgYM^@0GG~b^rbjRrF z9Y?Tej09`qX`jcM$a3yEXgojw`3-*a4*2B9!wjIX0(U0xN6omt+(8z70SaRhvH2e# zJHqzQtrwtz$T*a`^s5D*UCIHbc*0}*l#OkmY#ls#Dx3%ymRI6TQCIM>Yxg&`p%zVf z(-O~b#!#RMxfok&0^5@kV-P979RZ- zBsGxX@NeavFQFHZI0Sv2J&F<}{s4&$9q`Si6%}$RqF6?b9?%2I;J6N%#fOCHSA^#t zusay$dqxv*v+S%Og%W|yUoPI4YV6*a!a-hubZ!~4)7i`RD?DQJU={v=_=I-b`d6vCS& zVK__Kx}aj>(=6*mUyaF=1=9+u1OPZv>D?!*A~t zrH8=*VOQ76KubKPvY5=N@;lY9lI4R;)4W=+X3p0OTn^W1tmCgdMk}2PZPq=Iqi1DQ zK!fvBqk_Yt>LAMIn6iwS{euT*gxOZT_JG*$aRLC55{-fQ33zH6fhCj9K>$~NIa*7+ zkH(CU)rbA5+^)w!!WeLV{S^)tn`KL+ZIFhd%|%2+zFN$58}7$@!(WQ+@-BT+xR#nT z`eXG-e9YDNKvV)rhSJhAz1_RDMUr;ElZ%*Q=x8Nrtlhc#AxL0_R7<(T);kDt7)!gs zj&nw1I{gSe{*SN zN2%jIW0#0MQ<$ouq6{bS@O_p)#$B~XJ^q5du2A&_CRHK2{9DkoVYQg#1Bf67{sbvn zM8Njpz306g=x2bl>skvd9T@+Q#C-bM?AG&4W_|OLe}DuX zQuqWuDT}=S*R)(V*xQ-`pp+wMK%dBS45E01y)}`=;o)RO@JXj^gM;`*IIa6q-3Br6D|=*0RC@r^c-54 z6(km#!pOSeis!OlCxGj$5rK#X(iCXvlz^lYvR)5SO6A4j7%-dkW+^dVM^=ZUV`Fi& z3cq>)62LW|6{o2p;;F)!+(wOHV0w(7cm|8{{(X-ER!8x^@D~oLXQ}GX@6!X0m1+N? z9xEq?o#(U?rreOS^K)F?v+QADSQs_L{BV9D{DdxAkaooXVE4!XAI-3ATI#v1EZj;Q zaT^7PMXK4h8>9Gwf`UYXyG{XGas0d?pS5bHi%+pKWivb{>1dc&mcFR)dr&cu-*;5B zNU5d!QL9dyiL-~_gY}#e#JsYHJx8>edVYvtI3I0xK5-bYgBZsR-`tk>4ROYO&5T4) zhTw8wBvUOCd=r6M|f~S=No4JRHS-FS^w>o!t zO{>_rx{}Q)oKrp;^4-P}p~wHYUIzAm`^}1p)>~(>1NHj z5XU}9{0#BA?!!N&vRGY-BVk3RX2Ir~Ad-R1qG}W7j^%8oGD$YmT{i_iJVo9EBBGWZwEgo05 zx0q_{c$0LqXJfN^z@m?D%HHSXysAWbXYU3|o5D}wBbUt=Lk9z9!?%D}>r3PV?$RS1 zQ+r}F-sbIfV!eC!u=?YkQ&>lWbG%RU?fk}#vWhG1O}B04rGhhGfx3x{J9Ce5m(>>+ zHX0I+u=J<%jN;o*F`Z*HZ<79#Cn+A$EdB1SerKP^K(PNU?ag3|KsWx}%cExkXPeZ9 zbehex8+QU$>=-57;~PGin{m0Eex3Zg280lW7VU32F8QPyagkdQ@nWYve(R@d3RM{2 z*#nd~M9$*h{AbZ_d=_L{qM>Io*7&qmi}UxW-1l<=T|?#<_4m%&oLmo9QG6Zl&esU3 z?TrycHW|;^uquorUb1VHWWgqO_7`GrE%CoyrMF9uCVt0irD<>NxXoGH|M5i-Qk;GFG z^LKNkvC@JXufl~Il*ew}j!rhtD*n5_>DM^B%GZ1;XkWM+O}3YSXr4-{*d+eC<%%-H zZW5W0&=_%%e^QK-s}i@24boROeM_n7sa%Mn4~Ph&qGxX>&J5c-b(X&Sptt_#xD;J* znebr6iExXJ`opsK*PMUW^TXaA^JQoIKx8-4EMT*G>l5oOzbVDI)X#;OWCM@2u24+N z%3N6vvA#s^)@uSu0E(R8mA_Sr=D#9iq2p)MdZjMT1v-P)lSDg=>8^+5NnJ70@5NP+ zXGKT->DP~NI*MJ+RbY82(bwLzHX>W9F+o^}r){sI zbi^^A6ojo=at)W6VW0q6!|h$tx28lBk3P-+y#;fakshk8zRv=>Y|N($B9M4g1tV`@N*`Nr+d!OE(128@^CT(EN>`BIe3IU7j5*T-O?G2 z)=qug-12zJ<4ez1pFjKVv=lPxaHt_!`a3^%9!={jH@5vlN!7#k3*XFQE40FI=>V&h zgP4~ydp=9)&bDa=i!0kX_Rz&~2vCY4d(G4(Yu?GQWiM+jOLzY~jkXO=P~*gN*uVRv zd@9z$Nix?Rh3@+ie=OljV>JgveSUhPR_i^euIJ}v7Rz&J#modD<91U|>uDA0OpBQ| z+l`M;PgoCvV7hx_ztRCkG?HdYz+rzS3^V~uqUdMeuTQ$J5pjg2PkA?p12W^hWXJrp zlcJSu?P#}IX`x+hc5&1D1L6Q1#XW{6^JPzxpbf3V1m|*jO*4QL0O2eiyM9U!zi=`p zwl;)d1{1LdLK#2?B+xK#d%N%3v|os~B3(t9PVm(tFRkJ|M%#%@yk^4jlsS>GeC;9I zAES1(Om*UjinHxrwcLoP?`>bmmioXqJ?0#8#6`s#p zZLwxc3b|kUSt8e=?ShY4A4<~a9HM_P4oaEle!}A_71;L2dSN8+1J7UJY+P+f$ zacyj$oPuHIz4h4A(wG%QnL+Xl8z;428EbV}YHfT}RAA6ZFp$nW9Fa*1r%(-8j8`gW z^VnBRjBJgJHRtx20R>dN7W!rDU}xLfmPc1D?`Yw+pT)^~MUtgm;UnjGQTiX54>c2n z@?BK8&eXuDa6gr?QsMQ#xbpT%^Yjo$8=X3PxUuCH_*YJBq+tK79liUSo}D84!$u%w zYDOCKVq^e8Fq%uM8H`lFA**Rl*HW@(*&G%zS|i?_xA{D!xE7exYS>;iza1SJsj<9E zRJ8VhNYPWz4`;XWxa7kCMR`llq)9FA<=N^Lv&$%ApJ@9joJqRGtD9Pk$}+#EcxzY| zuF$(P!txDT>56l|caz0Mfhw1+&GnFr_#0&=6QXQhyEMBcbDx~B55u2Zlid92IirEZ zVs~1uTkF7tC5|3EKx>CM^@jS}$(1rY3)S0OLF~js>VpY(16GHG)I{g?5)7BUbVtw=%A_wE;`AVcuj^Gr2igR|k zkL$gdu1SrtlNygOy?I_TJvJhzt&r%z_@bV%=ID10<7l<(#*r~+8}qU|c~c9!`&g+e zC8(G#4x|=Wu3oQlr6?$PZl}E2cy;Z8W$7e5;(?K3cSYJd++7Vu4%6T^n}qvJ#+jLwm+N^H z-H5RfhZACZi?dFrOX7*ChP_Wp)jd*-NMK0Om_J=%{AX+EX5ol+-NvZ>%I3J?CVp_M zI!P6oqyI;C6IP>BPE*ywwlY3S?f{tvvPMV=jIZfgF~=pW0b7^jX7U-q&X6OZ5=#b; z*+!YKSXs3IUSio#JbV%vf5^!F8vG^n@hy6S%VAUJs(XLc)FV!ZiT&%B`mjbzbBRxIC9hv`u}h|OLC8aTZ+#SvB5WF_7T!f&zis7XSkI`mtRyY6rmzh|Wk59#t`=vC#hgCa;2ce^wODGw42aX(7fy}@N=ZNK5=aQXFVsDp5UrK8n6A}nwWu0kX z3#UE=;-FMR;duqY?jyPCAKlYp@|O1_B>qKqCUcg7eSV4hzIuKHlhPwab{HC)>)Er{ z!o~hAePl}o(-oO7Czr_>`F2sHt5}GFvf=`tU2hEq3YZmRIot17-BAYt9}xjL4hJ$2 z6);3VsmXtQzI@yX$DqykOF}}Yncb>Pw&D|@Tt&iu;Vukxnm7w(9-tZ~g~m;&&Rqkd z|NCzd#IbcK*nX=|5y2Bt7zFZnudzC@_sF0(Fm2(#&WSyiqh?mAQuyzG2pWvsw`qlz zj*42>SuU!qFyO(3Gi~jCYq8ey9Z8@9`TTcFE)tTFJzm2pYo`g%Yg(<;KQ{Dok$QIy zXYdLd?n++@*V*_(D0f74*^c?H*0+sZ_VUQBxW< zME=uZ$?Q0=+>%JYHW00n^J~K-YL3ZedKN$UrsdAv_y38W%rO}z4e}I277=Ju5pfCG zUg7~~dhI-+WIUhLJ_t`TQbl4gBzn>YUq>$?4Jp>F!S3HwE*tcV*dzk-$IMLCiu^Bl zi*>VyZ+W-EP$+{yvNO;LB5T=Aa|+bA<=#t0b)Wh+RCpjhQ(23^d(;PRQQNR=KHOCJ z!BkqR%S6Pi;>P5HmidxG_7^g=u+HTB_@{aLe(h#Fozq)9dYx^BQ@9E9b~IVuYj9)>^AD$MzT`IG?+&HvPo)@mbotYSymRwyv91LXE7* z1*uHr{@O5qpRmLz!*WriEz8`2u&vg-+4H71>2O=SH&tZdHV(GmP47DYy4lbX*5?*P zrW1_+@iumr_6L7wS2JHftS3^`5&_*zLU%;wEo5Er+86#Wd_(p&!GHhL->fk#W$pPh_Op_jG07%064`t3_;v#nbz}%xJ}(t!>(KjI8K% zxt3}->wk$XA82m+)r@#G4z}gk)#SM@Hp}Pq%OfojzTRR)j(E{i$KB)|$~W9p>ROO? z&kG;zri_)q-VH_kbPJb7~5JR8iT%}aI{xMB+jZ~T6O<7=1Z{jNj z#p8dWYsk9O=>+Y*8{+-wf!l*m)-!gf$EOE%scHYrUnyD?_xv71*>s$@`%OB@wmR%* z@0OG**+}GE^lS0qW~E-(BwbFZCH%b*SuLG4WGOwJf%DxX{86P#Lz zq*Siv=HpJ?Xr*!yAIbw<7P(2ZnU=j@!FV$Go^j#v--_$pgaq`Poa05DVbKgI{{P(z z;PffG-Kfy+#56s`Eq)bCZB8luoN|k-#N&?1lJuHwHMZHQvDbm*@TP;s6^7OVq$o*WmNrB?q0nY#NoD7*q^>j@gT9N`|-9&adx~!QOk!L zLNB<(kow8{VMS`X8LqBO2*TCvSU<-KRz1um?1KgzAoZeICWSQ5xJ^EL5?{b|rC ziEcxjIRsIK&2^p;-#;6p{jo_P*{ySxJ~)9hM<$!xV{^Ve0}3M_T!5i!g3KvonU_p= za93_@Rz{rh5X!f=mE)S zQ9kU>-r-fXugY9f10z@P3|>_4PdT#swUSgg=L5!8E}k~)Bc)$`@R}{v=Ik*^*dtc4 zRQvyRW88F`c?NrZe3)Ql9$Xur5PH&GYs$8YNI?4OySNV?cyV9+#kwGZO3<4^^P4Eg zGb#7~J{fkZ+lZDV4r?&aXnf+$OeA$ORJ1IB%ZX!QpQEyDif`6)X@2h2&>|_Jv*DGC zL1Hm@g!jP?(SUY(i0x!Ut9e;>5{2fYaB+3G$@}it>EzY%n&en)!bX*YcJyRc;Elsj zfA;UtIrAVg&g8+Kgj_BavqDG{fgByQ1M&X*oc4JYqnS>$fv1^iTtY&^;hCO}51m0B zePR3ANf?e%7xt3tqHC8^66=%=c8`71!g~$PPd?rJ;!FhktH0Iq{KQBd5+Bu^a-raL z)m-Kw?nrYHH!uzVu@#`t@eS9Cb`AL^EGkOEHQlRxok6?*l%5{T!c8PeD>TbvpDDjJ z_E?qwd~FFaw^vMRH=3O#aJ#xvVN8dZt1#{2z${D9*u!k~Tgi>Q4_zsKQd-q^^ahPD zxZM^zp9s95Dg1YCiR|w0I_;p7@$5H;{bx5qog=ICHY< z+5SzN22I_R_&e1vHa3Ol=U09RMz}wU(IBIvCh5u9wIstN>LaHFduB6p~Sdv z;E1=8Sk8MFo|BVTUEZpq|1-2jNuInB>;g1o9Jtq3YZe%YPLoOFD?l29no~N5%E924 zXV zhN6=)zd90*?8P-IPX^u)#&XUWOkVC6>6rGv-uz-GDdO=yJ5Bfv7aeQ@fACbNZ z%JMAzO1JAPwH~?lPGXQlcMfmVPZ!D)CUZ|IsHMEig->Ilt!`$tD!Jc?LMWEKqmFaa z2|cn^(ykHvJMkKiWF)iYt?dOSuPb}Y53d(JSFqnQ5{SYg-@4&0;C#X?;C^7$_1*34 z{*4b$SkJ}@8ZEq~O&1HzWd8`MIpqQ%i)HJxbI6RV5s80AZO3>PloguWj223SI1xe#WBC@_g}% zVBFDGWOcKx#cm1Pbmke7L*+X!5fTK}Ypo-Ql#$&zTGjVO>!HII{f=Rrj*8=X`YRjUHw? zdEYyBL!R19+NpPNs_7gLrN83muH&`4Ooj-O)A4x~1x?MT30{8UrzAEu|DEzf|K^&2 zTSY?+^TqcqJp82BE9v-r?uc*jL#d5}FKrs!Y2R|>*$vKyYig;sDTYHz1ITWeFhS$) zj|^@^?%%tIep6;5^GpIsVS<9kxLv&n-GFykWhXDD3VfGHOGEg1Ir_%)$c$|HGRs8N z?Usb}@k0e>r{uqyPG7DyTTVz{HO&XrxDUHX*$q}6`)w6k&$Op5V%2R1r~Y=N?7f%S zQF&1k{GDU(=U&0~iC@g#5}EZCG3DP3Lo%!H^e+Tw^LZIF%5(&K?3IEFb52Q7EUxwh zzOZ1#Sg%OMA01STLN-bdi&TpgGj@SqrR1y8<{1b6SPjHk(O()>l-$CUNKfw%G2Ict zGqjx02)iHo^_{_pVxr45M_`Sz(xU5mqiXzIja3_6VMbX%Ra)GRi(0@tN#2hr0`evjtoUPyNLKxG zD?yi|!-L`VnN)q8dR+|O#eKiqi|*oY|8V27>*g4&EuL%P) z(harg)wZxPyZq$8m=H`l$l=S&&z}GlDl;JI0Z7P$e(MI`?y?p$etZzU@h9}Y24@PU zc8}~m(#L~Am-Pev8Mx47)2gp~ygdUYIZ$J-eh{MbXeGDPjKylQY^kSm#moy-jVLY> zP3Fe*4bt)8<2nS;ru7`eYoLHbGFIz(lQ1U9yW*H1U+AV)|C z-}TpZ9+IiDrFfyW>drqy>+hLUwa}d>*s?u>W`yqOf$9aO_wV1o5B;Fc99i38hHj>u z7$?GzEX}&D9Hm#OHH`+VN|oY<&gek$g~FBa4hZ=bh^`@XS9^E&(moBIvHA|*EmvCq zePXG&Noo>K@1Rj%T_C21b_E53>rdiwLV4Y~@4*Mb*JWup#h$rL0TL&K45N)?454m2mzPnzvkrB$mphzKl!AIxLQ<3leQ>`tXZx| zLnI=6-XbUEu-^lBQnqd-l6uyXiwP*iC%T}eGGCdF_KUym3o`@ZcRId0maUDCBOw>- zg`=6mZbxw6J6$Ixn>KX6T${O-wp>L1aJV!mmNo$pU;E$=fq!k$Fuk!HG9)j&|4; z`_#+5q9#mQJKA*LzhY3#D&l|6OM<<-ySxAX!Tl9{A}9^{!Bht@>p-J~xVt8UTCtf? z^J&@f4ef3cDdj#Cx`;RE-MfQA1zC@z2kplfvhEcRL`FossT=5rZQlTHX{v&P0{^fu zrMhcNd>z!kq7POEy(8~6HQz}|Vez&($Q0>v3nbyFy(S&{`H)VgRry&YQ->mEmWEFx z_KnF$caOK(6=%$b{YtFJvIlvYyaZ++$kC8|m6DyE@f-$+8yWdVN@~!@?fRS~w_i(D zK!a~edHMI$)PE``UlS1%Xa3GisWyHr#mgzT^zrYOS>x6LF6Oa(>CMARr?4UNNVGzB z%)&v@m}g%8E#g0P*Z)}xp(qBwu4Csp8*JLm6LKL?rV$#RF?wUlrkZ{{1V!TJ{(v ztXas-jh`VJqhu@uu|^4q1@kt06QQl|KCWL59vD}}L@0a|Z;2DMgHR6e3fW{x4W6976-q0k~XgZ?!+Dr=n@aL_L`vKbd z{qM`aA5D2^iO7c8OZ$EA;G)nA2$6WoNFAB%gZ2+4WhPT!$KoMFCb^c>?>m2!W>uAfUK55o zeaHF1r0h51I@5_@WMzNq$qrK5c{`<1S+_9HK&qA~F1h}L5ed0}UL{DJ+xrY&jI&xiOTymPqx1sFZ*#Sg%(wRCna^kH;2uGk)~iI^K3 z&vced>bMwl&-g4oh~j&{=IP9)e-d{zH@Zt+cgn(0Gf#nzjxPTY+UTk+23H3wk1lVJ zpvmPMg)}nl7v|VExjp4P<70co__xC0S)rk3(0M-t&OiH z62{60v~x|t`b~dW&jwMajFRYQQlteab%zZ;Q?Ae@IRC*R9X?evR^KDKUngo_dZ`LS zp$gZ}+OT!BcOGOjJhn`2J^zj1Q3&fAdf53i#gN;sVn%u=AxmPCz(HBy5I(O#Zrnw? z=2L3Q?;}d;LyTSeomzR}3TvEr+=o0Jp@HKsQ_CYy=FE`G@Mt2U6FTe(w0=s`QK zw9&1N3oCkeQ=`44+UZ$uuWAw{+cGB(clyhxwKm-~;&xdHKbaBF=^T>tjk-7Y|IBq) ziK4CjD!iq5#|qCv8{y6TY-fotBvGQFq`^=oYE!~Fh(py0X$9JCkw*HMF zRQjc@QNF;6$8X5NPWnRnYRTz_=gTDZ;4p#H81}fg{scBpsIr8nOdh`#?|znLX`7M$ zg#Q8^|96m&QCLG1uaAeo3ERQJ0WE+KiRDwvUW&FX;s63>yotghV*9%h%nw78tj1S% z{iLa=Xf)8)tZ&Pt}PMPgp)o!3_%h zEZ*V>ZagMxtQp{cO-)M^L4$*Ek6*3Cd?9Ed%QCd`K|?Bax?OZvd@N5yGO@K+#E3rC z$9+={i>X%VI6UicRZ>u3c9>M-66@hGM{(QRCZP>3oaYldK=MTIMY%7Xxo zm0$Og+$(^puf-%JFmk&S#Qyrf_`|wsn9@~w9@D@gNmb~pXL}^t^XT2c&le~2g_JNg z%{srtiF_LZ*JDpJOQ=Qu{=;P1F@T+mL*RB>#;TR}J&v`0=#Sn( zNn>Lg^Ag_F#m)`;Lg!9Zx)Hg3k%K&DvC{1!-6H05OswYRH^rrNm|}Db3k&iJ3Yp0J zxN0uQpnbHyV6$X=k9YoAd}iMSv$MD7GZPX@UVB+hO;TB#V(W#sKl*~0M)pgmNw>NQ zm2}S$c;pz(-B0;gL>pIhbX4YsHC5jM{e2e;Oy%xZYBRL}%geoXf;8QIuLvUzh+QrV zm^&Y7xJ{|?&Id=x%daQr=QF2nUX+9UpjYPUK?dhvDuZxPBGJiR(V?trh)@-FGs7Yc z^BWyi#YbLyMjPX0cxuAGn?w)4Ks=8wnlO2J<=qtjMp)7dy&t3FQ2h7(e0V1r6&1n< zd-5tO0nKa8L@9TM_oIXuYm1c4f_e6=@za!KgV5AmIb4o54PZ9$mmjQdH!VGVBT-Vu z>-`@>-O3@omcg0^5+pC}?Ce_N>u70c$Z6TNv?LjcplkI|%ky>!f6(?%W&E^gWxU;= znI?bdcBT)U6>!0|Cl*%d;?#*JRIN(srFYje#WJb=4o>8dLtE*@(zE5UI|zX|Lr+gH zw_DoDiCfgxw>n1iVM6%Z%a&pchA_Jq;wzrDB;Q`U3E|aghnJW0T)+2{yn>QmfVe!l zvy8ke`bh(|9OT5hmL2g^IUQQ=2b6w~yLa20+^QS#>lbY$_5^bp0_UTVdO)kG&VUM0 zXm1={7uR^FnP*vbwI*6bqRN*14OB}`&bgk!qbhSqs|`0CaP4XeMrvbZ6b*V$ZQ%s? z#jtU(-#6&L$>PkZNI@ym4HWjY4jxlk=RZou+ndwJz`T!FUZKFKplGL5S2hY2-gTI+SK24v$d!#4kW}R=X$O zN%$}0E1uh&H8@H_{8(pMBl%8Gz7bE7^I1IT8V?|tAdJVli9J5Yj(hjBsE4lYSz; z>B5XW_Ajc_KN2Zc1vi)*%m(iTypfcVnJ<+z#E8h>5pHUVOys=lAa^rMK=Wm%*b9F` z8}_K5n`8(V+Vya6cSfwW$-hc+^*wLoT3c$=QB~gk6qK_@-*&1pRlTH13nHbo-Y(-} z5&bC+7Tc$ixC)&fRrTRkaxJ8MHyW7aA9B+`oJ*y}6uFyCtg^QvCma&~Ikukb?U0^WTcTQaq9Col=WOw{&J#DEi zG|)n_v<&LGqr+MCQ#s=MKxGrtu+69NgYq72Vp}$C%Nz7lz5%x#GV9$v0?v!qJ_aif zWOf218jXo)H8dPYt!hZezr9~o1BO!&AzZ;`$7%pvnB zHBprJt5v0HNzr%MK6=z^+KT008x8e#(O8+;m3s0n=|k87kVT|enQ4lf_u_sj+`si| zg*1S0WFDI^Aj?tUGD;MG{#I!_0@u)yfM*92U#Og#P0dWQY#LqFKDVLDB^w%rsk;GmX|d3T$6guzJ;7Wz3D1 z6KLtBUH`n_?z*PXyI!V_M_Ga76aS;O?~aD^>)Ixf5QIokk|0V*^e%{!L_!cGLiApu zLb{Mx864grK>HNlFQ1%x9GXK$> z`&pR1`giNXu;?o=q;)*IJ~uCwOXT0_koh``h)E|6dQGuL7i<=7EJWsHBvmM$LtLrC z%gI+)pWTib;`wy&WvBD4FN-AExs#juucMd2ar0_iwojJo!1Esdris@t1}7djp%p0JjQ!6&ANqhf>g75Ao3?r9ZYlq@>dpexM2%L1`Yi!fOkQt zZ{14uWC98K`Xh^t(~i+@pJQF)5RTE}jQ>lGIPlVVctXMoYKlnK^rQ?Qp4EbirqKw# zSE6P2jH^-;U=Z(C+XfGuTI$W4ZyoZM&*U46C+z?4Mr;p1oSmYBPOo2Pz0fqn09(7Z zFY7S+K+Rh8$+M`nA;|Gtn>=rOe?{C6{A)-U7*jXI@w{gW_R&+{<9--viFp?{`o195 z>8bz0spN@e4~w;-n->(9rYfi3d0Yfz68U$Z!As31uRrXjN|)c~$xZ(tQM!8xIKI_i z2M(PBRX{1RB|{eDgx^@44;jUN!DxXSUSH!C3)aIF%v#ClRlR3wJUbIj=BMxfg;p^> zL0R;s?cbiq!|H^nza!@hOe)f?V7_Duuk3E>d0nWNLP@zh@VA?nikQzR7U5StLZc#O zeyl;^rCFItFpSa}0h%rp`f{1uBiV62bGCjLJK~`GE3h5o-NM2CB~s%KZ}CT`IecR> z|GUCE&*r)U3|8w%8=afg5S90r@mUnCwGB~>+@c`=T3H+BVd4|I0^{cr=Lm-oR-h#5 z5;((GA|f*4^3!3GBHKBHl6NB)_4W0Yl~t@;m~32z5+=+MpGiw>XYCRN^eGf9-NfGb zZgM3;DwApFQdGNEBU@YVI*&;Qotj zf59Zf53+?Uv=F}q1(aq98j*!som3 zVVsX}o1}4|13zVVG@-M+TkdA(pXsyovM5z9AeO()aKF%?X0ym7R>t%SEw+)cl8WHg zs_>vC(W?Pj9=<<|CWP2|!XEdG|E3joIg32+#|&ud<{a#daz4|G1)xgDy7TeqSUh^2 zWpe=cgXPAl9S8wo4roq;4g3f31Yo~xaML?7RqxE^!Uzo`KXEJ|ye)ZuY*)euU2Y-q=<9>*9fok?yhXqG+dNh6IXfP?uNj`til$nHtS(nHQ z0GoBcHss=h-5;xj5WcQ47pf0TDQZ~E5ae1?G)2?jP7l>YF7mH{E9s36sw8K;DX5$l%(hfG%rj@$Y&f^uywCL-k{`(Vx9_{2-mZ)=@HC0Qz z0|Tyv6W#!Yk3B^-S$;OYgLmH;K}AJk2jtQR`rjEuok|6p2`#s%lK!r$GvqD7Ae1nO ze4p~hBe?f#;0VAGz}PF`{zyNY2oI)YLa=&?hZ@;5Hrm=!h05GPJ?m%}{_0Gj0QXOP zIJ5>uEoc-W+au(c&g5}@uuBM;DT2Hxm>go{9>1H<%b)t+;7tPjT91aJqX!YD;L$Pv%TA;uE_X%CaW5S2&uzGt=oT3IY}qQ!ygaElUTrv` zW-xX+yae$#SK(>fn9!_n(x_5{jIL0U{jRc+B)JsYm4Qk7`O8Q5WPBsuVOGuJiL5a+ z1W4SuzsNWraAXx07A3nRMm~_qobD8(xiL_*{u227a|mTv*`M#9#p(>PN2T1tYRnK$ zy_$n-nV5|rKEi>;kxqPEY@*9!7-Waiqj7$oe}px1ew%}l`dm5TSXVBSKqKF9YHw+Y za$YVZJh66UQMciw#bVX%e)2TF&!EkKE$@08RG6zMAKAP)%)@9AeXWMqMgl6uyZ?fk z>~RZ6gS>CACw~7}Y3IzB=o}jkajdXGl%)9R`75^={<-0p9g=RVbr$`tvUa_Cgs=)H z1i{<-f;1WwpQ#(4Z7lW|)tKaii?%4>ZL<;iyPX<$@{6Zw72MR)b<{3(;n6=?v>{>X zQ$&}2-2)N+bi1L=E6K=f)Yg1yDaCmzg@pSs`V%v`K(y4!gZZLNyzWJ6$6KztWmY4? z@cem`p1Ik&FHxbg4mLAm7ipL-DtuU(<7j~Fz~=&jLTp#!qtOFJ=zqV>o#ts*kW5fZ z@$VOg%N#e+)D>7xUo!ejoKki?$1#}+uA@{w4c!MFuJ{!Mspnc3ca#?cX?Po@f|eFX zVPWR7WNl61n8pEo(pA9L=k@k5RPOK29I##BNFV!}U$x+d6(wjrE+RVKF&J_G<=0{$ zC_ivOCD>+H!R%U#=x?m{ORW*a!?j>WCRH$U|D#4A>b-?IgaL7NBWXXIUd+@B>RYl= z!?A@kZl^WAWjnHV`T5S0b$L^DAz*p#&fF78kBI@!u628VcY%OQ-^;k`V2FsdVSyyS z*wFDfnJJIpBdK&W9GF^xE!)|dZ*LiOWWacxuoCCPf_K`f21>Aqw#{P6(FvDhS|^Yg z^>^FeF3~O2m%5vcLCAQ&#@&CvE)tmW%T57P;j>i}G8*nbW(BK4_4!mE$jGmk4fvek zO?KvP6>FB}UpGk3Kx5AdG_F1?Ivp;p79t-7J}>$Qs5l&&O>{*qyi*^ksBSTo4)Uu2C|%-T&ynji=pcC+P=nu zGF47FKT0!qb^v?YzVjzm|7J}@e&}Y~tx}nmE&;}mee~|V12Z*#ype|=EK#%%7Mab! z--kC1b}K7mlVerZ0}$p}j{{Ce(F1ctk8}=^uJ4vaEoD_vJ==Ph)7|sxn>Q~A327sC zIbWT3gyEMwo}~{~_oib>xn<=S`@aa(l)TKY!xVD+5KYQ%h|yZgD&sGd7bEdc;JUO@pwPbPJyDj}~I``kdJ-B}Pe}MZ<)QUG_^j0f8 zNCvq?AhoUtKO0dLx}wRJ-9d(xQ;_8WJq-~yK<-p{6OS%)I<(gLFFya}4Ce@2&AoWz zv3DsxDQh1w z#*4wvhn|4zNhnl%ZJgF5Kr^j#!<)L_uTD$YO);@1@mmR58H8(7tO;AS*_XCinb$Wq4mxrc6rCc75ZzY`V#7X9V)gCb#nN|zsqd0EU-GDPGbRds>o9EFqn*qBQKSRT(^b3SZD?LW zZa%-0fQ0@};sUClK@d2Gu-coZc1I)#Y66K$T;;l*utW&92He}*bE0NTI1;pBULA*OsXHk^iV^k|*wxy&Df# zn$}b4M^WWjj9gTz^FBf8Em_nzWtqZX9G54?Y3Vh@9jekVilTqJa#a|8vS$Pj*s_Gs zz5|9;D)GyskrG8tAiT)!pm%s-GAKAYR!ae>Tk9AkQ*tV;ygMA6(FMB&gZtC9iN%O_ zJ%1k=_U1gFl1=6Bl21}4id?4ZESMEft0tke(q}p5p(;9A4ea)6(l0rzmjq0ZnLmxt zbGuw~^bDj%kUJfwWxA!oQYBAYuYKum&Wt4?XVG#3#=<=G`zk2oFC|))qI-8tX!1VS z2uPeM*o(T)!u8bc1mCrkd@|GMY%7s|4K3oymyO+-xl}%$d-bjD9lk4?y)pg8@|#L! z7r~(M%iTh7=>KC1aZ`Evo4>UvLC9g*V{E}=;u`gdHlvi>_sxieHYVtOwdI7moK|aI zv(Qtrdj5B~NkgSO3tG;J>WA?Ay`gO>A1AZw#)!MH2{Idl2V4dZ#(%5bc*03W>+nK& zP1j;ro0@J&;^p1{K~=&3v;KgH#UXzFkFS_5OMu7zqgb7`u1A=Z*(9*i#VHJ(FaebQ zKbtkq>!aFR*2}1US|5IPu7E|j$*7rraYtX-u?c#|TW9et2whF=Wul)U?U3}Vrpv>} zdSa;JKQtL%nalFz|Fu2IYH_GoJg$6I=6@g7hx0hw@W8GZ1*=IPm)Ks8V**%`m7;g` zgkDz02i81KxM9#S`rH*9Wq}#ZGqqpWl@!IoSFzf>IsdeV%WM}?gs%9cvYs0A zAOUe5>W}bk99L2B6cT!|P=d?Y^L&!;vGMKxd+nKiH%5RK^B?phePg=9V(_mK5ItzVEeAQ1*PqLr@=O;0raB5+ z*ND*yl2*agKWCWjTz^`O@E+QDNT{1;0^J6H&F{SsQz@Z_omp+I`+I1fmMsnG8q zjRq&!Co?{M6adX8vbio(eUNyj+{M52c^(2iD`I_Nq9P&+Z5aFffft;53Y>qI zq5fE3{R~BYTQ=cOK|noz0%pc|(bnt9)O_0UN%6hu^1v)Dfra{8kjHATE)L+1bnpkW zwq)l6q7CPs^brPFYY6rhla+SDS^}qBNwY`x{nbk{tvQy=yYrr#F|a+9LGCq*lbVyd zV`t3DymeQahRgOCaikaPi)1t2crKL{LmDLs2bLG$u zxy;xPs-)7`+YXmqiSXDv3vlman!>#G0Y^O(ja+cb0duc^@G0cnM>EDN)PZIPYb(x* ziu;Fkus_WPHYN5wcB`^@UV%oOBy%p+#dD*qjI@6rTAs9KeWOYLFlC}Qho52+by_Wi zU|E3yysAf^r|fuPFYhV}#^T#jtc@JBNc{OmnHcZ@_BdfALMiHph?W*0tRX%N#IA!zAny7ME2*uVv(wFz=THT$Iqyv z1T#r&aT$5wdbH_#vSi{3SkIVug)96Ui$@riPH3fg|3pODw46@tc5M($hg6|NP&PoX z<~R{Twh1e|&?-83w&Xp%D*<(~Nv_G+`B)M1+b*@`ShlQ2`J)Qc(%+A*{Q*T6nSPq^WcvDm#Q`exC=Y{x9>E)L zASQn@R}s^o=x7`ZL!$qc{~it9Ry9>JiL`xZ+wK=H>{EoP^wI%RBJ6=*;dGcsrdwP1hcV#%el&zXs5C7jrWA)6 ztpO43{YwxFt42K*`{W>}nSH>`oU$IqEG9YrA(_i!fnuQtcAqfa43iV3RrApFZWaO~ z&T(aDf2aq#yPFSn9Y;??OM>#V+?|;E^Qw^mBx2=NDB*L4vw(Uu4?pUfJU^W~U)i8h zEI-$-!p<@6UnY^_4G~RAPmieZf?^QYaFdh7UKxS36bQ@`7Z#SJ)YQPmsgdHu`yPME z`p?=mpcwpbCIl&{EAS6FKHB2T{pl+Ma>~@VU9Cfo;YRy@ByygMW+aU z7pT8Xk9{m28{tm!_#3}Zsw=YHrQU}e?0Po>k=UN+_M@HFTqrKS!^OjHdCpZ||7LSA z<)IDt8A23T9=Z;6~YN zWps2j$;Y66T&g5looFm@REpf$BKt@4B6d@~-}`%7P^{+tFi0oP zDgFv9)vwjIvH<&~W2-)WH5jKbt-Wt!bSO(Qhe!EwTG0!w45vNV-thR0-b*Yn=oUTp zOAFgH!AIQsPXN`rO{(2|3p2MMEw)L1yxBg@^TVBZDCZGAxfz{UmyQYMcvaUY@G$B==DH5;=EeLyVFCNIxQv;m?V4~@YFvV=C- zy_>)8KSy3~#Pt~bs{4dV{q;$yJvLU$b3w;O31yd;Q(I5gIIjezt8honz5B1LD+Sow zf5idTBC}H6d-v`&t)X$@n?wnL@%YzfOui{#qHNgPfVFM13NvEVaM88x6;~u{c$X!+ zj7%C(@7!zx8**>;4Am(Eec2L2{N17$N1E{p6jrHIw z%;zvRIH$+j`)};&n`TmSa&nw4A|HW@(`YB1rz-tgc&yed`$L+D@n^Ad(=2j;qw_{0 zv0EKdHUmvYWuHD)J#CBDQW@NCtJt&%rWSC3@B@Cy>{S;)btv}c`m*cTI#rxjzI@Ol z!!(qZ*jtidGoUA|{QUV#Gq6eVRsw={e z=AHM6ShYeQN#W|hJ%}D4^vNbwY|#;Qcgk^Lu13)$J^d<{|FW24>Okk>lj#8gI2<0* z>rCXD(~2LnTj}N%fZJ}~I+^_daNza9RqDjpEV%seY}Oj#jrsCPht`gcdg*n>v!{zwSeWjukLGi4Z*S!R4?s)&rnf$#J;BvYlNYDI99$JzcIyucRlzE>y1KfK zCOY`P$zrzbLGMR?5SEz!g}hg7(!iD3JpWSi%6Xxg9K5&s`ub?k+ef?9j6xqp2a+Gp z#zL;WwH-aakBhS3U|0Jf!zlay0T>2=-+lg^?cqbiHZUJ9@h2I1Ut+oxGwO#Ido(Nc z1QYtTIHA^R;HuT3@cY)CBbOLJHsfb!^AON0wnwrw8Gg1iDS+nhq0{dHQzWW~QOh>)9mtS1^%Z+wuxfqEaOz0Z+036Og437Ao3)2MC zG7tN&@&F28OThz~Odwla`X~H`Vr0!td$rmI%e5Y#+o=)_;VQ`vDjO^plG1ax(G zuaERp`kuZA=ySFx?EOzD^<22D?(-2L9DorQZPpyZ zs->?V#$j`g?i3)sC2l->?4*P~UYgpK>XS(Xip0$e0jHbtYb>`B>Cw@H#fddVMNyww zeU*1t`9I}A?|fTuQ!`U9q`n$k|Dn!y>VSpb~G+>5q%Xs(aqVL`Qc<2h?mb>@x)i0OIej zUr9ocLxM?-05J?$>Py_0t=$t6%rtHVbJ$-WzbC7r`*=x{USmaKCdcH;+uy1~Ur8-3 zeNTBcAZIH|*OOOX&;Pj)6rYr&rK8h+R7Tm|)7@m(K@SVRD}7m2@m%7Lu9}WavRkl1 z?V~1*rs=?WqmbesspGY?p7|bE4r-ncbmrcXIV{oibyfmADdXE6Cl`Yr@$m9a*0^%t z%~^?0OrUFu5SL>xaJabog$&@2)aVc4Zp+?`J9Gz?3-&+Tr*%J9xpU8@W_*(2=G$#b*1^8({QufBvKdz$ARU@wNsS zqDO#k7wB0~c7rD1i6E3Zy_s8CnV6f)1)eNGVWA{4Tlr0~>!5p*X=!sm1Cl4Ci3EI} MKUG&OdTbi_UzDOi<^TWy diff --git a/doc/img/production.png b/doc/img/production.png deleted file mode 100644 index af9a5af7f6e81f2ee06a8664267cbb8bb8227480..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 47987 zcmXV1by$<%+uz23Q8Kz=gtU}&jvgJ--Cd%Tplo!9AcAzKbV#T)N`sVihXN9Us0i=F z_xJw6HFoWJ&biN>pF2*huC^)>J}o{71R_#HDd~eiSirxK&oBt^k?`aT3Ge}aqOYm| zsvW040KPz-<+bENpoV0EYg=sKJDv~9^a%)bukY>;Jmg*B00MQw)Rg260-2CORfm7WG&Ycqzso4^WI|mc6g{ zcvk4makA@0c&1ITRVRwoGSeuEWj0gvzS{ADdu5Oi>qDU%p@%|3EJ82x0+`=he<0yt z$s?ory)D6^9deq*#DavzfJhJ!B{b}5R5w*_;;%m&H)CtpV{L0M;zR3x;0NpCKr)H1 z(x<2{!(ebQ%OnAW4S6c^?~9JyM$qN%ST+^E)%DGf+nY2Foew6rA-{KCPvpt`9r<`Y z;{DAG-QKmQFI`p3ie#oztfTgDOzGz~EOe0^F-19#M zFz7&oT;MoOvSLc%+V!rO+|8AJ_LsLW^?GD+Kb(D=<&j`wlzYL;6+g+<94oZ+2J%B* zu%$Lr+fGHI9LkO!&eUUES;!51wLa`cdPf4jLV-0TLX}E9@lQSYaVoL&1p#B=*+w)j zDWhdzeWhf?=e?t)wktJqMa%1;{R;*i3llWLbOC3Q49-<953?Y;e|l)ZG*Q&{F6{fd z&d<8Y729sg`s!U8AF93dlnzRIs!aQP6^UVuXr1`qAXX-WCo{d#k>3$MHPN}1i(y==ai9I)%t7HR|_Xf61OIFFhi^(I7o#m?H#E*Qw<$fUb^r2k~9 zsKkBHfMiR7_`vDCLs)~}9mA9gq##g6);uBv%?IznwY8OsiL8Uax0fH2i7DituV2Si zTS{L--|GDyYg!Aw4h=btzwKgFOf2Vr7PeR0Okc8ZzMN{sN2IAIK)s;Xo0GiG0`aNP z9Oe6zpe=&5?zTt9mu9Jpf3h)Oj42$9h?XJE(G++gKhhf&)Lyo5C$bo%67&xX(ndP) z#d^5&AQ+Adc^X`2oC3wxVLgAm$sJlCQCbnWi|Oyi5fE=pLQ%@fZcKvp9)<|p{&TI% z5NMT14-BbzSwNp{sY5scX28m)92|I>D=mb`xPNCL7E#D-C<>~wj$yNNRAPhH zb_+(7hR!cJx^2OZsJ}5j_)+(7>!o~nL?jkH%A+lVnYEr`qmyJ}d+l|Hx?PD4VJ6PW zi%r(BQUOIY;j)Qe;*Nv2{BvZbd<_PjNz7737{zkR(SD{DVVVnrW4PfiaiE?9v|mW@ zl7!LkzBtozhTB3;#i&x#b(b=VBFAlM2 zNAAp<8gc|jbdAUD(Hr?MNT29eM_qM)3pX}jD_~OmJWgGZl}-qCzE0vNdv59yccayq zt+&pG+|kvaLuHpi`ix)MXN`+b(8M-q5TsnQSp7RqwVk1(o0O*GE}>qiPaqkj>i=ak zBgBUrB?wmdiON-o`nW_yxSU7)4l*778zIKF8D+MFZe4;B{O1hRRSHNT(DEpg%7(C^ z=-{Dap_>q%gXB%f7j8bNYcoY zw|9M*k62H*hX;}%V=xYoA{i@|G(Z{W_D%r^Z^~WTvaa4DB&V+{@Q| z4CExd$eSz9v_=GfD9nBmorWPf#!JLfGh`ia|IuU~5z(5n!LUy$Pc zc*N@>xLF$GmcO`H5sOTNH6zjMKf1rmZobfWb%_T(FmOD|CjI^}tpPJ{N1uiTy;u%cAE0Q#`z2h$Hi6IWGP}#!0w}s9@`^5eA_aOO^xb5FlteBrr-5>zK@EzI_9 zxV*X?kW7I-YcJ)Oi?d*QXm z(eozZkzbi1!oGA=q59M*oLjrR!L&^~I-(_6{+0F(!F-=H^08qddO6t`;zOFBTZfg- zR}7r@2~~a`fJY4CK}MF#kJXCAs|MKohIrVKz5#rcEKK5|Z?lgG{Pj0txvYui)W<$Zn+ zn%uiIj3yk%P(6NYm0fBY$KzM;s*ayaIH=ujqop-T!a~O{?NvA&ZcbE{Bm<-U<@n0& zK62Bw{FwheVec+24jCz3o_rMdnObq1iBwXP?cd4*i_-wi+cPqnL+Rr^GXC}Wg|WHW z$`;m8K1o@0w5S~%!Au@6G+N!;JlMV3vh-X?F1g-2>V3Wi{~TTlp}2YBOo<#v;lv{b z@4^eGf-x+;ej{MZdL1RH!RZ$Ufgf2^XCkh4OIJ{CUZqo zaYgxG%KkYz?mpHcws_)jqNk2;9pPuT91>A0t}}n70q)~k`;E%ZOF4z5>|j+-G=>Hq1it1Reit!k6UgrFFWR^85fybyMJ{~d$FIx&|CFCWbc}{YuR^6 z`y|fRt$4r+Iry8>n>Ge4;KdfFA(}s}p*TdQWskaSEuF$A%yR0rz>1n5 zbxnfN-iA6fQ8Q1#nB%UUh!L+4#Eme zsvw%%|Y^2H3tDYB1y$=RKU*m9u$d0V$ zqE7i*`&L_Ko%mRdA>~`X^N?+;o{YfY>HO9oT1kA=m<~L6)B~a9SViKK9;#H>X}2r& zUnt6%`;wGBj7mhb#HHW##oPKouBYZrp5NP1oo)UyK1MM)C1ENCdwQf2Jv&Y>5)^Wz zVD6M#_*fkME|z4Yq|HJms6Gn8o-%CJcB&QIAGK{9j)J=1+%44Ayb5aWL8cQ;t`Z%~ zZ2f#(l)A|<-|$lCIUqKtDdEqpO6m-CxL14Z=CNB3N2p&je_8Dc_|&T6vOAbl_v~S| zUgCOOn?_wzjAiGs1a6t#x=ei$Mk=n0|AIZ7atrL&7Xp$P)I@r~Ri2{Bm=tTjPS;m3 zH!J4|^hqP>-26!qM->hu*kyzL-}3|xti+OZ7>&ZFu)V0CirQ^NCn)j6t2L#yVlW%e zvGyi}1yatUC+~sv8wRn6iG96)zNK(*RLinznR^#Me_`5eyzv-1qOojB06*rbnYrq= zBlbnehOw-(K9t{@A6%wqak%lQ$@~>kRf^EGWBDs9D{%CQi+Po0`}4DR*+zgvbF7H{ zi}y3Li%)QnA$BO9bE0vy_=gkTWFfEo<x}YT>OTsBfcT#aUo73J|JL)qgw4Ax2%HxjhH~EH7V4ytZgZs z)fjq2!hT{vc)Fr8ZEEO#x?5(R&BiYpYH?zfw(eA!wq;g$?p9q+OE--~?w2_l+y0B# zPNIkJK8&&;!IxM?U&<+zJYGht6&Ll-sl5hW;bIdz@m-XNKN2T_A<NX|h(+LC!R~Pwsn5&gQQ>yLZxQ!@~ z!*ufj>0>8+fP0c1OiJ)RAxd^DZ>W|<{3_X#C%%;a(Kr|qv9Wc^-e=tRh^?G<1lzZv zCPqoVV8GcwqURNkC?2!l*R*E$cHWda?*4wa9Cy z*!RuRJ*+s2u+5_DEV!ns&)X`xLqRbCMg8pbe$$f7z-XsBYN0>ul#fVu!tZSbQ#fY_ zuJI|8TlixrrWpK-yZ{0k59%BtZ24f8>cp#cHT1mosDLynocJbkg%tY;%cp{ITWy<#YLq(XBB5~VxN2QShzE1{7G|uNLtGp&P2g-$E#1U86EadqWrHTzG_`&&gu@#i;Sg7$ zqnS!Jz)>8Oi!Xm-(-Wv*OIt913H<5x)3hX1N%^Nli#*HHjOI#8b5e!FC%ndzLVtE< z6Gu>II+`Cb)QhI-PG;8nW;speLh1JEk+S0+-6Kd2AsIzlQ$qqyrL%Ho2x)=9vdKs+ zC{GH!YZXR=Myf(l%Bs)(D5IE86HW;ex|@iIxerWrJ@HKNA6u3gG<=L98mw~GqGr~0 zx6caeh4Ifpj-)>3%)NgNc(;Y5v?SapC z!M4zJ-dFnY)`J~9oGf9^2H7LoPY31VYO8~2NWJ$q)yf8A#H}YITU4aUZFP!@xr_Ml zNw9_3y3({8-6!xLn?$dV-SBp$^fb}|>L2q34V#GpjEnniPo_gx!R?{5TyZEj#j?mJ z?5~F9P)NebdQIWh;nqDs&vRIm3RD245OO6l0(b2@yCsaxph2JDc)CP!i$7TLCS_H4 zHxfY;kn^Z-9`d9GO|WNkL!$JI1M9Jd90Mry8$IC(h`Y+G`-Bml-HomoC@1brjD~W$ zds4=NiVQ3TG+SJ~`K*Hk`y(a8#P=T9l`}!SkRD(K-TSN&&`-FfO^$GuM42P!()t(X zz~e2Y6E(&1!W&ki#5asYn9+u8Hssgi`=kZ*x(}5QxWg?i@(_dULHuuZ2&6lnbhs8n zjSB+ta{ncWLinQCKN8Bxox{_ca>3C=A~IC5Fem|>ZG6L6_vqRgg9l09aL5bO{ZPd7 zNmCy36#FD=kqxYVPZ$gRQqjU0iw)`bZEWiMHMeQb*nj@x zljwbQw|V8}p;tL8QruuXNTJD_P?r(HW`?)R(nC=g`_NBjmkBQg7eztE3|6o0`bR$* zeqj>QDlw?<#aT_)x@TL$!p-A5#Md7Dl73;44mhf1c;oE>49;dx%q65nC(KC;_T`m5 zd+BRW{Ok7UDDkh?Yc{CJtNR`n507^TJV|#u_Mc*isLCg%iT&{CYllA$IT;Ey8{`KI z-#io3xCqH*1-tFC``wFxa<;z+pj5pmWANG|5-sHCKTdMKk36qJ;gAc(qtFVvJdN7= z5H=IEd^igU0*G@T_`Xk_XiZ0?>eMXnwZaS8dgPVv)39uJ>?4rF`tN?C+ABV1|7|hz zaF6I&U%2$BY)p4v1!syR}=srgMjubd4!`uKh9zGijuwxq9?zJ}Wz;(=(0^4HgJ zgj3k%#|!NYHn1e#`il!si0}v6&%uV}ie(ni@uBTL(o?=bTmZq?yWx&g(C{a*g_g1S}4k8!_21X8m7 zrQ9127poNEapioPG6p=|vtWLIELgvuY5Y3p+4TAr*MKy}MV!!=XQWf%f6a#}6s57bp>ll#!-r59hDr=ftn>Cj*Ki zTdy6|R@i`Boe>YxGH}cVWm)u{IJU!5Yz_TS+3#Qbm`xjczixgoF`ZlUiSCsO0pJUY zOx~qI-(tSY*VCvu5|Wu`%Tgn12OkHqvhSlU$CTKMa`dW0OMQ6iQa9LZa*?V3Tk z`ERsPC!wNZg3E{DSEoZvOtK;SUu&L-=zUv+x^yI8C~K@m+p6$AgCj~A4myRZviLJJ zCoZlz{c65RqL#S&%xB(q(UA z4#lK{#~)7Vs~j0-SMab;n$-dnPuJ|ELEAQH#ZB=-^q^3pHjsXxtTwBf>R9-5Q+Ppn z>2Nrxl83!9`)%O(&sREpk>L%h$0CMBRY&ozsr=TGc)Gj{8zr{3&*TU7q#IHKAzkuH z6R2#W&2;Q`%$hp6|L#2Acl~dcHry9@2&8Seq~(aK(`oDb952~85NQFY03{D#`)}PY z@pKn2&ou&`owiyA|CTaUif2Qr&YqlZsiJYt2^lrkp#=;&dD7q!`OI8_4a9fJ7rh6; zoUIKPXD*$cJ6}i;)U`QS5)RhoA^z+LR<@tFb6 zkwApW*Y)(RspMYNWFG7biJ+tnDa)~m$ID&{i{$UM>Vl3aw0?nE*r*0f7Z2-42{9Ye zm0~Q~!Ysr+pNcM9B{U%WmcSEPggqskV|VD!9MmotGb@mOg(=0o@P3wCiC* zQt==jp98Og*}>}Z-ec2JvBYG0QoU)BHVpmj;eTRK4E;Z6YPw5*0G7Ew#fXikTm6P& ziv_G+ZB1Ixd^N}WUJb%_ZEDxn9-~Y7--}0PBJLh7!1G#F6yY5api|y@k1w%M&IdCI zdpdVpEFpjjbZ;p(DLUQQZmE62(+U;g8zu4bn76)VJRae4_>9c_&u_H|kSHBwFGmW$ z@Ug6j&~j5VXa|(_NCJ1*uT_s99QGZbl3^imeFCyY!uLQLlXBUlZ8$UUE|0$Y91b9l z=K(R+$*$nDKCWhRJY7!TqxtF>P+@ibe)lSZl#DfyMwHm%^R{Zpq{=+JT6jF+2BjU)!G;SRUEmjfsU#P2Gi`ET};q=A>lOEW}mmI zpi)D4jC6a5Sxoh-yhISQFU=q$RH(1-zR-|CylElB&`qe1`X3?!L*h7x(~eh)otGgq%v}5iQ^g0By|+1I5NT zd2B&`^nAP;-;M~{g)Id&d%T378plafpwA`_6yTo@s&+nngK z&oV2^84IAg8+iWioFE9>)|-ZdH_UDKd6;yBb`7~r<#&D(`CD`icE@O3rde|VR10$a z&$Mhm^{l$)lXSO5-q+9ROcGO$Qu~AU_oRV7>6XR)$aeH(p1bH|Zljmd-(q@4Ls%Ig z$Nx@0jQAKhs=X7C5m$&H;-c)+NDT+e#i&T)HaTChmpJL*ESfUM~r^)c)z4 z`ULbop=@AXiuhJ1*&exLcQVb-CmYZ2jtArwck4X|r0Lis@O#}lq%I_#>gny@&g|^Y zAN($XrE1dKClwE6-J&2V%$=axiW;hi7)L^=-9~XEyu6XHjI>%);!*QNUJ9smR_acE zJg>drh^y9<47_0C8d-tgGvB^-d>nY^^@Cz1!MNW!_jN&v(LsWjF)U@J>0x3$9|;2D z$BKlc8-@NSP>T*WH<%Z{f5S(=DNDk59UJ@gT0U;sMEK3uv9bI_J!=Bbam^ z2VXRv)~u^Ty;UHRAtB>9b`CKet8syz1}MN{K`gq; z-|1deiTz&8r@$yvX8*dUD6}Y%_%nF3GCC?h@lVxrwGDAfu)d6=5arTmKNPyEqjJHN z#Rmc=Eg)MDGPn;u16BTG@$v1M#r|f0c+{)%i@oWu3tR8e=f+pZdyt2{6%?{1e`+-$ z4wGdEI;By&cG>^z%$b*tbj*@cV8Y1&6=oD^pwfZ1z=Hx=mdZEh+xS2?Y`WkegYtg2 z)205H7ke;&22ajc3h3LbE}Y*QspcN1Z(iO~Lq7>g#S)lxH9ZuPT>Nmo$-ym!f=Vg@ z?0q}^pPAG@Eolv(#hr0x9c-6@7}P6BduS_?jKB?NQ-C8Nq1ONp&xW{5*zj~=Zm;G^ zW^kPmhk3^8kf)VZxfapTef`|}$1Eu%z)_oR{v%P~;KVsdpK0nbX+bafr{lYh90?GE zU0?lY!geL$e{9U9U}Lqjsz*Y2x`Ox)sz48F|K8mK-2WLPcsljNZ=lJJ+NoirJo?H{ ziqI)ANx19W!x@05_EX#-cccfm{A}}H3+wL+I%hvz*Ut$yyZCHv+&6IV9FU$cRV&2P|lZ>ftO#R zJQq<-V3coMXO%0_?H-(LlDg$o{@;8-BEkm^k_o;%>1hl+#WeQ%N#or%7d;cT+)IV{31?3f-&99&}G2o zbBlMKNRX}hR4~1zWCu5j#MiGy6Eoo)W_7r|?B0uXaqJ>i`pAGn*G_XS{nsphhX*H= z^D%_yo&)KmgkS^}Aw4Q_Uy|4lA1{!sn=;_4i}Yv~02_rO!)$lWb%xo3<;C#m>B;cv)0M!ZSePxuvR8|Mv(_Io@K^DIR>gkX}e6JQ32Bqj>2 zhCb;YG`f_(MkyR0dl@K>_-G^e1~wSKLs1JEa}P9UC-W!r)zC#+vle09DEMK{kc=uM zU8C9~`c4aNSZqiUC8Al4*(l0Jfr8TIJM&tBrnC^H}`o| z61Pu`r)wBMZ#2G|bATM9YVoNs?%aP3hE2%B5tp0RVSu8^)x^jnn*xU5(&-yM zLOB|i7l*c!if@cs(zWkcWTI-54;mwM*_7jfx-6EqTy>}rB%jfs((Uj`BZmDkS+mWF zF36cHtt68$I3w0bnvLK_g67T( zQK+dP(jMyxjceWGD1jel=tDDtgUXbfNyzr&Tg=w8baj2`HP+$Yb*fblPz%71N}~AB zm^nx$^JEr%JtD3?i96$heP5E7ei$rekK0ZF`#yUIb+3APlL89ToX~*5?UIqt1vX?P zGwWfS`DI$K@#qR&?u-CL;)7UMMh`CRe6@5WL0qV=kg*4ZaMjj+9rAk;;N0FlR~$BY zsJL4mrGyL_SbKkTMux~yis~*_`kWguR85KQq1aH3uBsORd<&rKO7b?jAZ3!Z_i)mJ z6|R&tP%P_<$D|SQQ;efWjNJy-=Wi?DW|XlUPefy0PpBp{_7EhK z;mKD9%zwMYze5sScSz!2$rhev($Yv^yRgyp3V4HyfPd9Xkn%rf9I}AeiKK)Z3ov*T zjtpx@LFs_M*R!#p0E%OjYcdBX>S$Z?mmZ79yo3jU1>rz~b)vt|mT_Zt%utZUDGGn5=V$+Rhbl6pT%YLH_5NjRCc%y6Cp(J4$5 zSlrtJ0;R*-%#A1dBRC0C4nA4$;Zv&rXVW%s;Su7swQl>0@{kN+{NU{dD=twTDI&Lu zo}}_pxlZhi!FldW8sVOePcOx=$5-F{=013FU_Kgrh%XPWhnlEY1-oOg_fx91Wm`nv)T^SG zTi9wu|Cx5#^ND3}fWzRf?RqPoOH`_%c}v^&qu~$i*t(u2DF)J)|3MyJ20q_q8i8Y? z;N6#j?^ft&Q-V1G3}E;lTgV|129)*xW~f;SADyS7JR>hF2wkE0fM{7U;fu~m;I3=s*&4&H4<+GRrdC6|o>$OIwP?&w`kasQiUgJTYLc|T_`)o$$G&?EjXnggtV#An98OPNdMX|kfEPSavG za6tRiO4xL&naZngn_PJd7`~}Of>d`5-Z%e2YeKfe^#f^03oH-Aeh6lUr1ZSM3?M(r zCX#yCNb{gDM)W9U)FvF{IYS%tW!%WG{P8vjOgrFyRAEH(_1$rV5G&d!?D=KqhAUW~ zohwnkGk}-izXlr9Gi?cV(gLM@mSx;lGWzo@)CI@B<^l$>YGLF&t4PXq-HV-zfpitwQJL4`urU;4{C= z_sdqSZtn((thJFFy-NaiTu_($>MD~lhLC2x@4GnJZxfR3L+2Lx1m5oIe%f$L+ss(= zIzAqHex3H$bG7C^zczwPZ}PjuJ!3*eb)9zvHOrluGOUo-tldrB(^bgl;W|tDV$%b7 z)~f#66hw()VD4w+hxB(2xIhw`i6{AX7=`vfl_AH1dh3-gt<3wNF>t5e7Hf%6|2^XW zg~MI|A>sFpwNWzJbfSL$fG8>nB(+xvex1O9E$P|i$aS7aV*{}Pg+T@gBDs)<7VN-- zWZh{XV5GA)?p!Ni$eny4iFE5)B93DOh-q+a45R#&dVm=%-6A&;T(B3XwSh zvnyyyE&UE$8VCpna0>IXmCuVK{K_^^Z$~PA#p+Gd9^kTsV}U4=<~AnD3FybWc^Et_ zm*gy=d(RTjkP2B+=)0git7y9iL`oN203Z854Vpy(j7be3MbtA*hT)LP{J6)2i(xLk zFcTclw*;BqIYDy3dz2zu__IRLN8gr`ezG$6mWId{si0-0`i+=pO09VID!9Mm<22eeTV)I49kieRDbJ*{d{q< z4t{_%@q=6{v zWc*WrLz&$rd_@xUr=Cv=fIxQf8@(1qjl^Jq8`$1d$a97j%-iLm&~CbjzbL?W6m(J- z;P?ejrL5ofy1(zi_#FRIUg7F6l}E=N+ii}gg6`vFZUS5tPe2k2_DB;Po;07-pcVOu zATx#@b;Rl{0LS#GWs9o#!Y#c&;TT-N7lHt~X%&G<(wRPV)0m$0%Bd5-1RP?~C}OFs z$TG_uR)j*Mv1-N)T@rHv%v?YQeYTDT^m#TM23y(H&{q+UFgHIB$V7+mnsD*poB8nX zX+A9a!gngajsSuxF4+Dt0r>FyAQmHWupKK&H<-$;N|6*nAFy_bsmr+rl*J$#P!>OK zp4vV;4e3TO#e7q|B$PA3K@KQ(5)cJoEQNKx@{;V|Wm20?AUh`#0lj(LROH8B!5@u{ z^B>VGacc*JUVM8cSS`5*!4XE{dzwJi*^mQoF4!N;5`nzlfL0|$vbo0>XoyS_19OJD8;6O z0ExH``b3E;iYdQ%rJaD#Unqx671D2>3aE`Qe4h`ef_{Jg*~`z6pZ`|IsM>zHaUAko zUEEy8^LrhC$R8ncOh`U+|GhpgBdv(;4)od1O5SkZShqG9?0w|on}|KP4^jMvyT{ji z^!;iUWR%`k*H@Og(ed1e=qbkkk#(Ch)xFurII?9Q^6c^+IVr=^naDyIM~BBDJh7m} zoc&9GTXj)2A?oQMKgU$KCOTq*;QLu&43inU!0^!XXG4bL)GbEE=3CR&jv5*oW*ZR; z-S57(pGt>4RYeao`N%_2lMJtZiq|M<_vsBw&?#Q(K_WQBP{cXnCqdZsSLzUGsV6;* zNwKuj0eN8}?7^y4b;&6v1DWJ9LZj&YfL>bfE*p;dpAN13Md!ZlEw7S^6zdY4;IDm)%RyhUhNlWN%(b zJtho&d&E>KfzL&Gr!fX;si3S4X<9Ox-q77E#^19?5W3$ZC>L_}VmRIWJca(N=-ZH0 zOK?wcN;`E2(w~Xm&Tl$E5fXtHmU+5z>wc?NZ51CUBh5})(3etLo01Ch z#F6K@R_#|HE)@$ah|W*&apkgpB{D?uz>#7)E%r7O3L{|QB#F6)A`TNA_*yIe9z0)M zkqw0j)H1-V?cq<;P34&Zb+U*DkaWwHlo@|GNFsB*8!ZZpJ53wR|G9=ET6%;`lOy(O zb{sKPn1umD!LZSdZ;*o?ohdpeMg+2>b#}#H>+DMI8$a-R-E$2!DoXE z0Nq-H1n%7=a(VP}_O|!&@!PR;Qn})|%ktrSSoZ?>E>U5Ji?RQZ^ZVT=GpCD#3&PzQxLK-uHJ$_l(@9ySfgc}s#g z5FryOH;d07J;@{@+~=da{hADR+ri;?q5rY=G4ymZa;2~emLljmGjxwq~3W`Bl|$2nC!>rBq5AaZ6$CV;~(WuO0vpno}Qif)!h z^3>s+7bF}dH931^E&UNY?Hnl=Z6N-aXzS)!Obm>DRq}pt`<1Knt7tWv3eTRgARg`9 zLpC zUdnNwk+-z-@~Y9Vq}83$^oi@6N5l1Bi3u>rcIe@JtQAxApqR~ba}(bOj#gkbbc9N- zIQcQ;%WhG2Ii1DOB2yU&!td7x%4>Oi|3b@&!_vlHX?1f)2rotT*UPos&vXUYoD%)7 zgmqtNo&+I+C>MeV&C>unV^w%$h!lHZmsb+iT6ZV( z^X^+O9UeB!XVGtf>JA20<5$M6C`#S!xx z#fm_F(PT%e!>rfUG=tbsv=g1h6XA4+W~+yyBafe)jP7-n!) zXnha@g`-+c?HzC*_He#shyhja;ag%zGw63gzmO+F_p7OmABJyKu2(~uRoAj(WX*gu z1DBX_5|Yjo)VctNENry-BJ2;syFU6k_;J4f((gf-SfQLVdQNpPv{5urXNJFi7mABgRKmvBy9lQ-2D}L#AQ%K^1C2$N$TerDg30Qls3Y1iW8&JPt?mF(Oa7%%;L2$x8b~UaKHprm{w%{{BA@*+uX4 zpWbSylsQ{xZ2Z;pHSDs>$@Ifos|>sfDTu)dx-O6A%iBG=QlL%e0tJSGpEth`I^PBI zmCk`-u5JK~?6Vl@ygY=^<=vWjjFZzISm_cGq_7cu^HVOOZ+DW}B%sqFYdecg%W?1U5Ns zYE9b&D)*o9+slHpR_12P0G**NW9#SlZ~MLWTo&CG;17`ciU0G;%4B9c2sdSf79(Od z<{@3U=58S=P2V$?WT&O(aTg~FE=yqg_p$%`~2pNO;9cy2(XoFVRdU8?)=G?O;p zgn)zQCa1IOk7oVOJGRcMhdBF_nu&T>3D}>KL0oCRIWI>}M-~atcAEOuZ~+<8Q&n zLL-k>T<(oIETV+FW7oOEj#fI6(CCkQ*!n3z@<9wxnV~bR<68qJ3|3vz-y=@0PVGh1 z6QD6^tkl3YxyeGC*hUNO($oBjl^w9;;FYjZgUlI|mzxSWC&( zy<*R@PiW3LVw&%n$!fQW4cCK+u9Y5$-`Mt#*1-|18APSJ-m}>qZK8;OJQ$FU-)5gpwuT3LpIY{*KIvg<_>aX<1X9>USF2#X#k$pAfv zc7HXuRZiE(dR;b2&tGijUVKhXMSPA-b@Uz$Zg@;!9?VJUv?UP8 z{~DhiDgK0N%1OK51-zJ#dY&#$NoK*p4rFuft0*)IeNU?q$Z&A1GS;V@MQ_DWLU+B$lDo_@fon4RUz?oWv_F0;Ba+G? z!!TZTXK7AK_zAW|Dyr-hnze)ojzg+Tm%irpgpi4NvC)%Z6npsIbqa|Fae>XKC!2R{ zZMpHCAAfnBh4(c?t;m=WOp0;zA3R9FcYbui3?SK@@l1KS$E(-MA+3ae07^Wa4kP%F z*%PJ;-SH+z(Yp`w`4*cp^z5nMj~3{K6A)I4%u^<|D1P~H^T&Nl<_Ng)%ECrDG&?XU z=V*Wi^3=!by&ZVCn0FL+6D6Y_CW30{+Bg5cWt~Lg${T0kq6n-$w0GwB6^hs=+bHAT zK88w;;D7Tb%jJFm`%`!Or%v7_hu*~#+*9)q2Jel6n)lkiS0HDZ?Weeo=tWs!yM7%V zG+xQJY|#Kn@2CrU%zH1YnPIq#I3oy6=vU?3OQntG9>prjRhIC?{qo)tD|MV7R zcwEpGL`yDq!f?OudpFUpoJJfNV%XxE@`T}Z8Jj(B18WQJWt;gxKI+7uyPZV3{oOs$ z_nuTjDMgJth&(1LtiY*HsE+1H-~_VuL;zwg3!axA@eoaT3&< zmIw%oNSY#~qh8}m{OK6yr(Zs-tU34X?foT|Eyj+O|r0NQM9| ze|&r0YC>926TiPb+94Ed+kfixA+_&WlJ4+j$4b?NxBUW~BDos5Vr$M;VruA|w#O74VTKvF``6D*Uqy_H`C53`z!g&9lp)elG&~Gc z<~pu`r#QpSNd&3U7M6=V`SUjCz(#p}!hze&H#q@9>704MMHecH6)~vLhnXF>>{S0A z%@RN79X9%%9WUh(g&{q&u_+^BBJ$%aJ6kbdpcm2;V>cn4r(TaU#YLH5>oain8+kJ6 zFaQ)PMs(0dV$rUj^1CH7eDhk>@vjW1Zve}f4ttg zyA&V&qw?(}8*+S!I<;_t;mOTC{znimhg07P9Z9ItZeKCiN}tQ4X;&OtXgPJ1;v-%C z9HI@N**$gX#v=zdO^*$I_uhfeo38PT*jt z1HW0;Dt_z8!pi+#Kp~)+TyDe#w>NBJeeR8H3g+L*@T(ST1P7Ql*ICiAl!yI+6cfZ@ zP}@#Ef(xcWeCg8vWt$}y`YDz>$*{Kv;`yb3{0mpEW<{}WwuR&}i+V;`!Hm;E%=21f zV7l#bN4zS!hWsL`Zls;S15eAUOSj5GVnq8-2VIN{kj0nWTcM{%^S4U_JxL|ryM8ZznrjgBwg)ycymqq`+UWox&%xy!9{tR! zBy#)H2FmSEmHBAp1l6ni#+kkT)g_SdxapKTt8Ocz>s7go1mFP}PtL<1H!C0iMu+|% zRqq{7_4|j9pL3kUG0G-nWpCLthwSW4WQK^4EgXAg?|p39vMS2T-m*tU_9$5)>38e> z`FRQa6ZG1WTyOUM zTg6A;_7A!1j_QPYIzg{jUU)Ncjsc}Q=(x$q{NNiP^hqM7uQcAi8zWqagfkHbOBTG@ zqw21Xv!H3;`~B<3<5Jr;e*if#JREa>CHsxA$Ro=!E=5riVG2}ix~51~N8V;PKHvPvZdE_LuRx+ocmdDdhxqg;VJg~{UX#`(m#5n9j9 zN`om7CN%$yIdjs?LP9#Lq((dSTv?>YS7g!8K-y8loTLXV=vKSB`XCf#HU}nI3dDF# zRm(5KqLhR|-phat8!j3u===2ByHA}FJ)$QbXFjn$TxUxQ9!8We^P7ii2kPJUP{*5o zyW?7WKPx(9lXaZA0v!GM+cq3f4?=L&x)NFa8Cey-;?JCQPcjpD}$amDS>C8bd~ z4b4`m8xG8W6~MkrIPLW8leY6>ZR9Tvs{q_l?<+d!m^;{p&DYl_(-BM9dIA zK8JA@X+Cm?Uw)ZcG}EiUU9Ae}fKPSD?g^4tvg=NK2(tRQ_ph~i*-#zfain&2 zmxN(VJjzVDH*S48dNQ~uIsByTeF!Ah_V&re9rvDK0m{7W%&S>7F~RD|{uGS!w-POw zkf%zbpojMR*NW&DV{yAHil;fz>y$BWs+#BBY75Jj+Rhrv-{&BR+l5LzFRY}Ap$(s1tnOI>F7_)DJVa;3$T z^JVPCh;7|}5&qpscDz~e0gjyHd_YjwHkVFsB|}v&%@bzpr^=fqx>#JXoL?)jgf0p% zi=VQ7Vri|QpD-X4echK*RoYM0;4U1ZHm^;BN!jvbY6TNBtZ`QCgI$C73y1Zj-D5tB6L>D8gqDX_Fxfu4&D$T2JT)LC@e~ccxFwx7^ z9(2*su;J5p_AGO`HbvdQwMJVpISlMtE+5YorQEhN#BxCOOXN>j`f`kD81xU~pF?%-4Qgtel;%m>bY>%Vl^xSnro%b>;7AY90ZWcZChBqa3<$4x$ z++Bo68arbZNSRe9)0_)wAaKQ!^NKK*bzB2PU$t)yU~&;&ubo#Ra$;SuaWyI|8zb~6 z3FgLF8pX|&VFb2!q6^95Ll+4n$W)aZc94einjlfCLAvXB$&=zn06=4zRc70@J>b{9 z7}e>4M5%mRaO8RYvm_cyvj5@5ImzPpBu!Hh3=@%5H}0A~vMu1x#YncOVbdeOR*;iS z*VGC8{xa~6>UMTg5+qZ}BaV%8cuk>zBafFqW_GT%wuSJinAH7TTql`;%d_L{Pu;JL zR&K*`m-;tRoRz1U$G0EHu0?-7CL$ef=tvr?(wG_f+U=D@f_alY>A#^mOtf`!dTZ%x zQOuji5^oc~1$$OEV!gdzyKkRZ?WKhFz2$P1IRoy#m#*>v?Iwtky4*Yh)eE9834sE? z8zcAoGLB9Sny#ChM7)EGvy9^19yYi6FGAQw4>W#DRJm_yR_%&(xvYu_qLtdNR(tU6 zT1R)4f9G9DWYzr+QH+1|_t)R6FH9DrL`{r++kE$ZHa*x`PCV3jijFdD?PouHe{N03 z^9hUIf(}#Eu5M^86CpO@}juPS{2Mw#NbO^e@$MV-lR1XT&+WGfW1Gy);15RuEJu&t&{ zXZP36;R+=kp^yiPg74z)B{-QF2NSbjB*c)<4aAyqUs=*RK5{hjCgmwuCT1AscNwU2 z_q1$ih%g!X5+oP3ryb?FhA zMXRIz8Z+u(zCS4{FM4pNZ8bL&K5H4XOlFIy+K8Rnm4CoPRZ{|f?ShW3qH2?D>Yu37 zO(f2oN?R~LRdjb_iU`tu$^#lK2Er`vfavy?Lc&;-IBJz%Hoi=Fj!f#b}pmv+c6xh2^#0tv|xYi6GDJ z7mJ>>#{Fsf0Z}lVAKy2KRXk4jjlm>jRGD(N;z^&!=mKO{$MTFdGP*aNENY1AS&fNp zPwHqgq$g{{u>WUUNX?c>jz}iX7C~Gk1;bCdc%(p;s~xq483}{O&996+8~Pl)*{|8y zZ-?b&w?Xz!cojzWib8*e!et-y467+;_z86hwX|q4R!7GCMM{o6{W`(9`z912 zo0Fajdtch7Dz`A=$;)<3!F_^nlmy5d5)%jK7?rx$!~P;k6A!g<6Ly|F9M)+53w)vY z#9Eu`(<68!1x8TUQ3f0NEDa;;?acTM8D~l?d$NNQm0?4uHOGtQ5>C_B6zlr5EY6!R z!0-K)m5eAi@!O}Fu*X+>V^;*pA5kj%gd5m-<3j9tZ_T`RA7%cUDHWi0G>+PGC1Hr* zu@$smAaM)yd+rtAftirCU5F(ko83wux+M>5l>CFoJuU@Rjqo9)**e8dP<2jl4+X^f zjtG}yH8e>HySmrdEH4!S&Fw7`zv@pxLO$6yAEFgwVo4&a-c%eD(OX)*UXL%meZND# zbE<3O*IR5{t$nYqrNEq!Aj!hi2^(4*TrGn)51p!#Df`SQjVdqHC2C?DI%XDnqT@Nz zr9TcXyy3lI%lK#s6EaMF#N}j|=B`)m^IE<;d}N^Fu^`)b*aL~|TB1=8j$5F@Z?R?M zDvKe7!7H)4ddknpeg0laFU`36$vpvnIhG>}&Tdq|jOrY(xAKA`v3hqU$muDA z_+g><7^tat4VOmeCV^SMdf29Q4|hE5D~_l-jEul+)(0J|ZLNS66mY$qw~}QtAnPkA z13JtWgp8S}74`KIclrIjXdKuGDaO6ELu$PG1JZmxxe`#RwfxP(KXsdO*r=2POO z^1ZWc9pi2&$YOk8DF8V@c8O03FuA6({k|`?X4Xc?#vweYiBy*)2=2|$J@N_3nF16E zhC21L6tobrHQWf8$RT-#X8i6REp3Lm68MP_h1o?PSST+e74#;ZC3b1@0^>DVnPobg z4YjTHnBZEaAxRB3mH674P#6y?SC$Y(Rx{@&CAOvJkq=s=3`J@l%W>Xvx8?OkUz8I> zSJo_Ux$HHq)wR*x-fD!<=20kdf%=`o%XJpmm0i$&aCjMImpA^;UWZMcgZLC=ztC~3 zpiK)zzGRQ%u_JP?g2@BaTc?m-jQQ|#eBoJsW$#X+#P9^n-V+ja_qs{Dxf1gb4U$vh zjr(73=R!PI$2H3{q$j*>QiwEAu*Nd>tM7o~11Rs@Nrcc)c+bKFZ4M3~r-gh_K?|UB z3UNs=WaSc%ClP749YCf))DRScwAe&iF(vR^*yw4w&@qr;La6aMKr~AitKD>D*oUl5&Urr^rWxQ;}tMTt8= z@ltfoH_e3RF)8Nficcjy+cO!h^&^3=#@W2W@K6UJ>O&YXdoAfcO}f0%b5kim9rkF#u>`wON1cCo)s06 zo=d^YnM)|;&{j{HY{UV0WEOB=+#(m97zCjL1akchI1NPJYI6n-ABoUgD5P>Kv8|o} zH0g!n^OgjN{}uNkq+}8Mhaz3F8-H_bekU)RRJ`?k>pY*2U;7#RPyZ|bvr@{bKUqUv z+9`m~^tyTr(9;V5TlS`F>kU%{G`uj9NX~+23;%F(bvq=m^T`3(@scDfGU1G27#XNa zt;Y}5v5y}_FY(s>w0wZCl|$o#I8VIdu2N(~J zWmS!x5Ksr{7;?GfVD2~DYaGoVpb55s4x$w*gorbg?WSjfC7<+%%fs|`xF!iP7IGk^ zNe^(lFmAXBwx`;383e%^PEeEkpWAAT(25V^M^QZOYc7_2rtkT$Kh-U)sgnk_8KAvl z#W-J(Q;19ml6f=w_l*AUxBNd1fv0SNhinHaQQy)!qK8LxX1>&6r>dfF3n|^bhZGDw z;aG&hr_oW0;>6q~_)p1wQafIb)-U!BeSubqqHSvwN&MZ;{ zZye_S|Juqm&l$(4t)H5nxuAETDvX5THl0w-Th>&0ew|u4n0}kY1S@$~_P@vDHod$~A zjCVdAwV1$|vF4!n@=7b>54T=EP(n8+n8!oieo~j=KaGKqm%C3Ru>Cvh-X#{rP+*Ea z8S!ZT_oROSy)ggYdGt1d9bZ2f&5S~lCry=Ez#5ZmG^H4K*-$9DiCs)VXHWxSV0ln3A`A*AV{b9+DWFey?ecV7lLbiKS@50am-iwdj3P771GVOu>(6>NW$1QC|(cDo4z7`BDj zrqpBZx+x>4s#(wq?Tol=QXB7#<0ruonA|5FHM2PXbw2^JuI=@$`uILNm~y7qt|Mw? z-TXwu{S>87g2A!~qc8*79Di<5y~8R_f*{ZoT92J*q)Z1Ep%YLiCq1Bg5Mx(1dBKQs zyo%Exr?;L9BW3WGl6?%*!2;n(2Cbw3P*7;Ze&&I~QAWd1_~jtOP8fp3)j@(R^)qnT z2}EZmK?QX+6sd@DfrYv()ZG>Yj!3tweYMxJ-10X$K<9MI=(;fx-&t*W`3Ju38I(&U z$K&x9G&M_=flkJfGgcTW#sb)8aKTaS8(H+;W_kQ|fgxoyeVzg3N#OTR3bl!&^8pUr zr9_vus+fj=k03$_-Dr02)dT#kC&FexHbhZ>-NQWu62=37r9LTb{$$~n?~4#yFLi(z z3uaTlB|=L8DlgN_%h%J#KE)3uQK~dJCgw=xy670jdXO!6qqEiP2WS-gxQFAx;PvD| z=o*7EG57^!Q+VIT55teWwRNNG@Fx8A$n4NWUs80~$)R)OR8TnX`1eFhZXyH}v~7m4 znUO>?K14WN(!K#FXXM`sly{t#iL#!{rJ zOU5#-EJPGzdkW>1nJmdAwj_sAj^N*57eOOYNja-j_Z zkABO)(s3lLp>?OBgZCqk!Y5`Fhn6}|!?c2mtsB^C$|Kk)c~GyOb;NO_&~DbhY~3&q zpQ0`G*X>i`<+shweiiG(m<_Q_(3fl>W=ZfZ0^dg-eqSd@BJ0S0KN9majs#aVB|}n+ z@c-sDs-ipmT+SM0ZbDfyao8b326u_Kq9H#~XKzG7bJtXAOqmHp5TEru9V3QnUCqkLuYL61 z_xGt&^8FzZ0GMCXqP!xAkqjt>fwi-eTvmBq`gPs(qmo9KP0BCQ@1 z%(o~mtPm?BJ@J zc{6&6n^_EyPn^=)ch8R9TwK%LJhIC}q(N=QSe4OIL7Wvvi&5f9ADnyzS=W5 z(l^*nBJtt785N?1A6;yON02bi(+l5RBlJQ*v~^~WRkpoA(~Vq6ECohB*^c<(nIL!1 zS#rC_@)xUfgL^H%l)``hS&pspd;aTBV#V7bihDl5`}_)xoPG3w&F?8DxO>u-v?rvR zTc!VgkXcLA1wiygq4ys=T^Xw*8|>dfU{OhQ1!8}1tfO-&U`o|RH*qu^3;+=fp<)Zq_h34OF8MB43Xqqwv``bgjz;G7oKwiE%ixDc0mkwkmqPzX8Gj)^= z<6V(YGub_!`$^tP=oR_?3;=Oi9my)x*Hbr~aIwr=v(>d)*DBu)ge+Z-Qhtx!?Wc3r zl*{-6klTc(BrlxzC6Y#|WSr8?lS3{&dc9}8nIAEa)3?4z*}XOT_cH%4=^^9D7NNa8(^t*?!+k^fVw#5aHVU*o@1nuFtN`!Cgm^BRBqg%Kq6C^Q)o2 zpcmQY1w90_POFHIPAQ>dmu@l?zxQ3YM>WQ$=iG3!yV9ng!Ml(%HXE^=4kvR2VLpZ@Tx zMgo>{>G`fEsMj;-J*a$b3Yp{Bm8NC?I$k+=(&ki(N(NjTt$hvKd^plU<2yo^hn35W_Gi|J`^N$FZ2>}%e}-CLOFo^C zMNF~K02q;OfALIM*(DvxqcZ$@P$tf?2$Go1`eYPc9Amm%gV7H1LIO-Zu#^O_*X?yPR`jaw+RY zY7u^CWY$rt^4jq`Fgf2Y8DK9ztqsa-dL?JS-wx19`5IUXXoanPZDWlT(XrYCjb9s<%gsvYuX(+g4wAjx&4>w(*K+S@ zbrf!hYGXukwX{uYngqAiQuG;u`5*1ctCQpEA>eEhZ<}reSU^RG3h0_2D94b@O4O%S zDmT@?`L(4fpro3}OFyYPrVcC4@JRU_M>am(D371uM{!4saNgM^VzAqiNGK#dO(ZVf z`{ydc>b>Stpog9UcX}?*(TiCi`C_e%n=i?R%7Es!MU-UhO2X||7-EL{2c_1w(&f;_ zJ>|tOlmp<(G~2IR#;9yHU~7`nE6w2p6jlA1k&}k1RO?B0tL)YpkX zjmHcPKF6ueaI*WREY$EGcf4-Jsg%Frap`9;7H?gu4%jPLtxIjbyGrb)1&7G(hY0J4 zc*pKF6TVQ7<~RaicrPLi7@WvgSt7}b)yX#40NHWiA(5+QD_$W^pY+`-`+g%WFMpr$ z9Waos)&3726;0K6ELUa@p?VxF6_D@eK2soJ2+|w5^7fK?`GY`A+j*0npa+*3%dP5# za-|=DUPKg2yh%G!e7SE?b71RLTJmbm*YvC=Z2>>{Gp*Z>X=y91m@Xzl`T&+s7wj=Q z92yIfz9_WWN08NXe}&zD@z^$|@jJ`ir+0Q`yEcm1Qz0z*Z9z8}d^1$lKYK!AF=l^v$Ni@r4P_b`CZd8~Lh($mjMNn{ zI||G-oTSU;4|DatS%ea!gj3HJfcl5Q-CpZnjpuc)r56)|bMa}K?w%68@$W@w>>3vB zDq;i|ccmT8%G*A7%m1#QW0AN~goCYuueGGq_RyqIUSIqjU2E)~ofe;MULyW-l#%6& zMj}fkx%CyVoLA6gjwT_m$<;t@`BDAeA~B7?K^c64v$@4E0VCfTZnaGwX-A5pn>?tD z_7m(mumxk1_h_Qr_LCsFkJ>pb`R~ePgqRZ%Vi`~rHM!#`;PD|h6`6c)JCw)ELs6W z4=eWN=Tf&Z_c%M9c*vlgg1{ZuR;X*EftsKROb-TwZ$EwuD^v<*GLEz0_+Bk{Uv$%) z04AiWC1Tc^G!CTQ+F&4k(BtVUiA<)b_~ba^r*XeS<7Xrz6mJ5)JBKXJ$Zh&YKqD-s zMZxxGYo^OiO6$3eST`|AjJVx2g4Mb>l{@4xD+JoZh62d{RB=R3_U_Q$&Ky^tr=O3R zGc3zPh%6bo4!42ys2o^ZJnbIOKC%fCm!lAxKO2ncx;bl$D=BiHp>2LfH3Roc%95f(ifq-~v3k8NdyI=*(T zlVVs1$2F|>!|1ou?u3Oa$7gP<>iTCejU}*IseLgBQ)}xR@MxB7yN^y8JgmI;F|2i4 zLpS#iLNoRS`K~~@-aDuLVhI&=7lauQR`-o~0M)atE}8W1g!mEGzUQk@7`Ow1C0CPO zTmRdUc>`g!HX#clh@_#!&;TJGtHH~djaZKU%!Zngtujc}el1{>o?I9D#P(Qr1S>Ru zM}p#>U&ZttGNato8st<$L1{ujeyO|pNr$2o-26}o7s|5!we!UM>7HR9=n>eHDgN`eQ5; z4MupPEM7rA!DCjEpejY1bw_NGkWuzvoxWn)X60}U_Q}G~+9RAkl*I;j?(SPe^sNa= zOA3q`BiHbKaK=UY)(>V{_*VO$?H_84Go0&_$ngyeP=1Sw%TjgiF69p`-m-Jg(2xn!8Yom0U$3cp?9LJR+)HNf6t*q4 z2U{e4-6@q~!7_4t&8_y&X#UkJDK*2N=*-N5UfEXchd}1WeNyf?D!K*H=3v{;Me%-f z6HyxZ2@qy#DOGP>lR7eRjQTBR6-Y#~f??ylrA9v(ubgP+*Tw7!q|`8}wpRL*ipulAk$w5zKMahU?X zmMQHCSM^c0VD#$BSEW!Udm_2wB4m}Eax`TtC-x%?w%?2>MLLG9!h`M?b*vV<_Ce2C zAEpDD1xzV|ouI|K>D_X+3ju5VSo6%Q7>*R!{J~2>mU`0T? zRzI~(AL*^cj@;nT0Us2=xWi=CvzOVJ%7&HB8{=0hAC zpoQn9tVdk_=<97Z=wJaE3C5lM+cZuo12;gqIrAZqxLWTrzcEoQnMk+a59ss3-&b$G zff{5+eUfeU;vBl6ZBcQ9s7U_KVV%;$je?nQx@g#c^h{H}w`_CQc?Vqk&dLH1oz51GVo622P6<0S1;9YhXFOnb}L zh7p(J9L1BGwn>>fqrxO_)%&{ym|?D{vp%lD<%#_ky8VD)*+<+4&S)SJJgBtL;SiTf z_Us)Yom2-ti`3)v+8?9e!8^_oPo=*`PQrdy?H(QF|GhM9ztA`HSA13^MaHKIi=u%t zSDk+>v-C1lw=& zDJn4xKEa+PVYua3|1Ed^W=lG-2sBGN1qL#H)9}g3UC&#Quelh0Ak<{}k)0U?_n1%T zA30Jvcx+hI_bRnn9VR28Y8K=(#N`}a)G!aKmDAsEj^UZpe0MfK}n`?L@gfps3(usZNb@}Ji{l} zz}LE|P%KsHXT}rpV)G&wY}(xI zN0}ZfXtymOm5eh;*;H`Ic>7iyp}`%vE|Uw?SLR6v8&slt`u9VYnlo^a&nH+Vu@+dj zyrI8-eCh6aNfU9iR`5BO#I=RnO?AGzK5FaFFm_wN+%mGXcZQr>#j2I`1A`Y&*JBsj z_6r|Jj02t*oVm2L)sg+kYj(3VH0I6!=?2za%{aqQARCUQaC zymdiW8SZv(O8E=%Hj@I=TL8Bv^2&(NPzY~`{j?GPb4YfucQ{fo!TM^M(8fWV&~icw>Tw2@mkHU#Uw+DFBnnZ}`kIml3x$W*Taby|7?eey zzKuJM+fGsT$ci#&*x5Lxm?@5uiY@$seKdI>n>jHh{jin!M3F`m^F@p zv){=ui|Mk)9YfbJe*MbCh|X~A^a$0wK~MaY`Cf+Y>O3}Ib{qSn*2m4#sr=%9oFjbS zCE_TPFl1w|v!S9DY?&Fe{Ox18MrPIRvP^vTTjeKMY`#c%DC2(b;)Dv6k5R$kW2LI& zccBMcJO%?pA{@l5GW(N-*gC%Qn{H%g) zC&HCk)Fywh=0zS_AFy?nQtYYEf z5=f2fxiJ|pP-Pebgi}%9Ryd*c`2iP#s1oFWM<0Ao#9fd ze@DNZOAg>BtS0It7~&@Eo>x%$&EWkoMtff0u>Guu?z?z*nJjeX%Fx{%J z@C^N}YxNd?5TU+ymAlr+U05;6D8Wzth~CsPesJqiW)u_?rq@}h8RCL;@t=Jg4N;I? zb_v4W!TL=k)g^9fkzAs915Tnjz+z!%h2o^WTj&LZa$4mCNEqy)1>&ow{NEZ%AJ^UK z&J5!5^Irhdd~KHmW0h1x0I6ob97W{K=oR}v|M7WoV2+FQz@Rx@`)#+%}5lLllr zQqC8h)U%{=dgBH?f(?Ygz@Z=a>-aCypGq$|Yr%W8$H?;t$JF-W^3Lx_|nzirdun ziM&uq`_`gxMQS~B`FqK)Z;$%ElriHb5UMHy0UyY?Tm`eateixTLi1iz2;k3ptr zqD2lNi4ZeM7RNg%hexFYE9<+k?5ENXQpVMkatQ3*On? zGd-Q6>Nlc7RN_(Rc0r71(B@x7%AmzZR*yc^CuSIVEc_0WBRgKnDB|$6!QG4F17|F~ zdc9(c*ylAds7}0iCUXJ=p*D7>VjPjCa>ISx7M3+KbjsHne8P^O;S*p1wrNgN7^6*LSvLMp2=7%W#<)dh$Xd|A4L)()LGT;n?VB==zzX zBDg5zG#EEwSu~vAc6@e#weTo)3#NIKU%}&DhYn^ZVZPgD5dK(m1fGvD5K{(1{+oga zzNYZP+X*T#y|Wu(-xUHs$^${?aAs6y%H;npPHD&IR->t#_RHfo(70QCc#$k~p6qt< zZ9e(Ohu^$vN)8~SI=WSwM)Zf6)Xc&G4qHpUZm{VfxbC%CFbW%&S;I|O09Ko9@h3R1 z@!<0OXvdeXr2AUPOpTLS!hFTpLgi#S-R0iE#iUp9M4F7#f&Gi44fW!cG%oCCd|iU% zJs6$uVurqoFuf0hg`}9nh!-CsfChNq_yf6fO@Ug=I;}_W!F&-*AM|0NceAkN6*0VA zlS#`1-mmQJ?TJ05l@Dh}J@{{hpM%DK{sTLtQI#N*);;Kstl{M4rzqlb&|P~#@lpH> zfR*$SvQ?)_f<}9l!vDNNnjSaTe{Z6U<~97_aJBsrsRxwQ*+>6mEc&Q%yLrbC;03o}pWb+omia|3{+c)PZb+Kfr-%GkoGvQOl%yA`}5 z9KxZLT;Xrv6Yu&t-OXioOM>);eLd3zObDt1n>t-h^Kj>9yL0|)vm z5Q3#TLJyfx(Ux}OpZ92=(^;v)Z?-Z z_KXN5m!-{L`wY>yTZ*Dem17{G$RPkjKJ)T{{ZyGQM1V?=nBW$n{w zZ!e1F0NiLiB{=IL(E4NwrdBI`Bi%X{eZ6{405*jEwFL>0WvNpPyWd_#h-0CgEkS~0 z+q?>T1GP17W;2jlKM58G|4TGkvOIErBPh zGS=}?){l%QX7NnWhn#gMGhbmLOjRe^ZZGZp*~@uA0X7ngzo21o-sm#nLz^4r zLHmm@$@o@C_+@ST)qg&25u4C37yIvBXnRTa<6a zRWCO^VR-tQ0A9%F@G2tANf+=D>K;aUFZ-19^Gehf3D(zKpZn2(OSC2#FZU2XHrku^ zd3OiM-<`e(3nXi5Pdbac)Vq~r7*^f`8Nz`4($f>)@>!!;C=Ko_WP#zVPl>I z7#17RRwn#d>XcNM)p2@j#>xc!dZ+fh7aUPyco%1mANJqN45hF_@y5w`z&498kiC}# z6lSpvY=wcW6R*O5FT$7WjzbM%V91PV%Joq~9h}leWLO23P2_M;j;x}eK2d}D()sZS z=780#d%ZFd2B0zruleRMLD>{nFC}@Lr-B-@mXUnsHXZ-L;P{{e^ag~bWdg(h4rQjw zm=n(FI3eZDryQ^_n5)x&gURZ;G;jf<`QP`kJH|EXtD7GpMY@dUK=|CI{|y)~6`K_G z&4d`j^WW32z;9gNG)Hiqz5V}8!J_nv2{8sRULfPJ%C^A2pMA7EF=7V05Gb$1I{VxI zbJ_nhQ(ppp^UNf^*sm|SATE!FEPn}b^)r{*aAf-LYns@)A2$Wt1x*)TNL&E#9FeEG z^Y~bSXY?z|qcQ%gG!An7mzStc2RPis)i>2h_1`S9Sr zQ(0&*@{CEV)7S!k|0fFy%iqL^@QoI&XVJg6`_=ximIu_!(K2Qh*Pk><;pY%vJ3-a3 zgFFTt2B|wbY>o|c7tQuG*q3m);V&O!Y;P%FQLPdw|0;WNu@fzH<$n!36s0QGa^kFQ zbbc^?v90;RmlY}<|Ni6R=rkMnCjYbG>lG|!z(oOdi3)RMPsOB21qIlZW0p<(Z%#M$_aUEt~EPL9&`ZpOd zt@{QN@-VXh=B%sI;xS?EaMmA2tz%;gtRQV^EHm;)n9AC=efAyx)7RVfaSe%t z;#3Kfy?YiwGW#~5dEFd?{WlDefAv@1x;=@f?`7e#OeE#k4r_u4I4p(T%#d1en5KG2 z{1%!zlRG4g%siFOh}m=a%bjEKkBH_*VHF0bgcLp)?0(vNxqr6uc-C_F#yx;y2dv)e z_@r+!;V57+CSw<*Q-Zi#K6_2W?lt7ltU`QlNaF#gsZv+{?rT=cCby~2MJjwrK z#$AS)&vC}VsuNGg} z{Wn=|widRqj}09T+Wx|SUYsAUgDs?Dn^)3T7^JO+juub;@50H%1w=z&{_fKt-+tVG>Y>Td1IlE<*ZZ{`>$+8h0ILwWWR3Y0W|R^;%t+-XmE4*)&@ z`0uUjjf@R`{4#1V=oJ>yKxA9r2FNw%2!UrU{4pznn?SE&!eMfa`>8$LZ~0=yfm=J4 zmHW?D^4@8U)esHjt!rCcT`GwmPc_VM_58Db=?i&PW`G@q%+(;rPxun)WddGY96v$l z8I@>W?;eI7Z^m;#FJ^g^Kv{C;fjF%-t8(`bvAwk!socqrW zNybd7#C$LqM|f%(yWu|6ICNEwRFLmIoeC-Oy4ZD|^*5U6EbyaHvPIm+*)Y!Ni3GF1 zPom6%^^RWSDCyqau)pOIWxQXMd0VhOa3;; zQrHubuB=VY5OW(m2a!fUQEbNYG$uynSVb*!Z3W97TNyJk8($q)r9W+)wvOaXGGo5|E9r%VWL7?sX|j zIbXedd-u)oWzh!g6K;a#BhM-!KR$D|4TFE)Ha|naxU%`RfenJ^9(18Z<|bvvCkVi| zy1U&m97r7^Ep`wb`t>&MiI*4ET$uX6m+g&l7Ay2u6ZDUwqI7h!A>aZ;4!HpBHWoGbURxB#n&W7oBfX;A*-Cj6j%l{3sWAAijL> zJX(MkfUELQ#6u85AB+{O#qSozTYK?qeV#QBjLoDr%A5wWqHmBv{|u|${vEAz+I1B* zUnczldUft0j6nsIRoU-$kmZv@bG{#Le_|_A$xtUoS(wVJJr3VJT}1dsdXAX~9d4j5Fh$+_sdRUS9LDd{`g8x~ z=9kj#+`b`ncVHjqZ@kP^#>xdyg|Z{%YxKL4xLJR`Q&~H+Hun)`I9}sVOCn&3vmnBF zzx4|A*>{YXHUO#-u?31hkVako&p==w$i3>`F?vgL?GPpspPQfyTXcdu0;(89H23ZH zScV(x?#Y{T`Fa}yWL@^tm8L??PX{%1wm*?B%_&8N0m|t8S9^AQ>%%QY(or7_fP88Z zr^UHH{OgqW>E3*Rcp3vNUq;D%3hD5_IE+rpQowKx#4=*nu}p3SEFEf){NEEem`n}J z-%<^pynpff`BU|z*kRJL!$i8BME0eBy9ZQvJ&9Aenmiq{<^Te;7EW)57tS9Yooh=K zMp${V6z-0GJJJG225#F$$iDQ*%ZS+0Rru!!|4bDR7`-iRy2Hf12qJ-X1B9og)QdBI zuga&j8gOviC8cCK8Syc7wDXS*L5EP*h$)LV?xrse<0Vudf zLjJxvAn5xfEr8%|?P$gwUuDpJDpyJA@hm-|s>Fw0x2S@rVtf7ZeticjxseoW+8X=h z_R(t@>XxL!z~3@4HZq?w#RuQ^(`-^GDqZ4%2-*-6P&KmUi0}%c)rCCpyVG%`B^_i{ z|6d2^t(iseOLnDIZu4Bq9*zKBSaIGluTD1~%MI-v*MJ66=O#0i{xp(cWNfa{g7*dC z-xBHelJSNJqUsJY{t_WT-cr(#e3|3#{~g%FufQNj;$*+0A*a8{;k%_gmTO{?zGwujA5V*88fYfd@E$&&&gUN{W$=OW4o* ztges`MozIFK@?yDmA62s_2yq^;bB;9~iOAr^NcAEHfM4&9?G+(+yeGfn>_3 z_o%g;v=>7*?nwKEMSP+Bv1DpOim%FsoACU>0_s9eQ-xQ#Xc$W`L7=Tw4z_kzCC9QH*QD4MUV-f=AJLXgzmojEq>dCMG@b}_%7={g-EA3se-$geut;~i{S8$^Ykw~0}UOXj8;J?xc6;vcvr4I6O>bF1t7m^j{cW*svN67 zoIp@SGV`Ej*%3Hh-;B;27i=p}7_JKtCl)@J6Ql|p8 zUjq34m8*XBCAW zrrQyIGdtq|>IzLMIQD*?|AmA+j*9LdNX#t$eaE5{x{Vp({^n_i^J^xFgJbWFB1<|9 zwFmW|%PH`5mlO)2F8QNIev?ZIf0)LPLMjpx zk0psTD(eA>wDt=bczY1U@ zHGmzzmEemjpXOQ&JLDXcqXo+RdU1#UBHs5oRLU?iGfI#vGMKe^~6wmm(HBXo}PlT(A2cuF$E3o*cSl3-Mp*D!}t#WrCm^of${9254Ga#&M%r5 z=WyhZGfI&wLx%XtkR~>qKkRa2!2o2c|7v#NtOd#p>LAE@<8_UpY!CqJWc)A4lqgDs zqOZ#|Ve3#Bv`er_yChYB8%`}J$&4!5Vse%FH&vuqM0Y^U1#e>aIMX3VDB``P2&%hA z0aGdx&SDDDky@-16_=L;$!@AHNnXSr!M@=WqhvI7K$16CUY<7O-dY{m()s zB3?XVz#aD?z+(9yjW;OOI2Ji|om*h=yOPUCx!d4*7r&)B|9B~;amW8 z7Ljq*zhE5{#2QOq-E4MCQ0`Re+y7V8TL(n7z2Cz#!yrQs-7*ZFlF~4ABhuYU2#5#> z0y=be_ei6NAcBMn5{l9&DJb32Dj>?c$NTwx-}~Qn4(IH%&))l~^{i!Xd@OWt4VXDVxi429Fu5A|Ex<=-rOFUAA*_7xjzjHW@W;ZHg!W z^n^gSKrRO&Y)!`#nR4kydBjIvOxqE}8?JJz=qBPTQ&7}VnWpSotwe4Xnf87u9px9H zht!oB9EC6VqO+foy4ZX_J(%WM)kN@MM2DF+Y1&BZ&Ix1irV$^yX*}t69Ua4YnHqud zlAYwKj6Nq~ewim5eYC^_E%3(BT#O;oG&^l8v?|5xyA#9RHd?ydUOe@_8bBH@n+E|j zmxwuwsdz(d5dHLofcNXLHx|wHi%rX%P;sicxS$kQC^NZ)^u1qAg8B&){fSOcW7s|V z>YS~g#&e$cvqm4Md$?3Q_#^--xJzxurz#I_)Pizxdzavtl}8~xPB0sc&Z5jg?aOK@ zzswQ?Ew?+h#hlxUU4d1Pn0$1^_z5K%O^?Xx1>4Pgv_duV%C znVR+Uu8mOc_Ab~MC$!y|U;k1V_nDXcB^dZ-eH%yF;-4|-K7*VxV50ergw1dMcVv!& z>PRhqO;i72o)x(eYM2YgdXCNpvS$xDRR^~1p;Z8z{dGM;&@FPhJKqt;nom46*A!nb0v5W=ti$qZNO{J zIZF39TmXhONVglLJwOkCkS<*CCGY`^I@%4C#PQ9Axu3!RVXxvV_lZ`k-~gQ_r8hy} zkcz^9mI8ZGDp#X{^{LLk0m624e|l-ChJ}_*pEVRl;<} zthGU*7Fq+=4#av;pj0XB{`tur{rD^mxHuY_L&WDoR4z3^Neue?-7|U0aqAHr?-uF(Z#onJ#cyGr9`{On3dKvezYHfOc16~ZTdFNr|P?}Ftqg34Z{tpM%`xb)q z`|UZM%k^R1dH$cl1NX=A4_p!W^U6x|Qa!<))%TD9T$D$gVrPx|!`7b?-p`XI+f&@3 zAFDd*S)2uM^%I@4C*aupQ7$hNbH-FBHoieQ<&&;2y}tqcKHX{|U3=t)Q_1yjPGHue z7wKQq4?hVfs4`|a91oxp+^;Ac7zbwW&@b%Ujrnt^~$z&7&0Af zf)R-clv*eig&ZYIKn3)7~d$kVs zabDdWV&wRP2m&*)eTiax-?ROT_F~@I)EJlY2qe*b&lk(^*`DP1b&@$K;ZI>VU?xAt z2U^-t5hE3-i&qr?#k~i&fD44(_6oHuOpA*T=S{o&`yS1*;5{UWB)lGQCQ+VoVU+mn zYD2hciVBmT2)TK~1b|NdAEq5U&b`YOCOj>~vUOOczzD(O3{)!2neiXYJB z-?eT0cs|DdhpK^2?%KS0ole!i}p zTkVjJFN$mb4MmJzoH<;O#yB;)1NGxq0WKN8yw-phyQ&gvCF17GALgtahsj06Nwz8~ zGmssFGrUe=G7@G`h#fr<7yRAK|4pe9@xY83|0n7$P^L-6ild;O>oH%&DNy+~p0=R*mmcf#+pL(BmRa)|QsQ z>DPU=K2iR<(-E*E{N!I2@;B>IVGV+>(Oq>FKt_HjOwPOKSmVDzOvg<-@x*W$8S_!e zghR>?fcelbC4HO$i4F&C<@jMry72sa1{FL=yfN9lH}Yy~O~j2byfUMP?iDTkMUSni zC|~GJXnf>`8E|$b*#g!v1K+hVauX)-IzF|YVkaoN=UIC2bL2u(6ukd-6|E^r{Cs0< z6^_DeoQ@jzoVM>f?~Tp!p09J?&{BWD|9RK{o*KBN*EIImZyq4Yz8i)MO>DrmS?~M4ni8HuK?Q6(25sr8Kv%u!obkkx zW`k+JalJGwB*orXl>brSxninhq^x^nT$kq}{V6HBb) z-*BpamZu^lpqH)w=JTJTgut;`%y~>N9Z{*8y9O%wkpNV-ubyQh%UkLQy|K%PAZpx{&0ZB=FKihQ)v?sVf_O9r zUb8O`d}@6a{En-9+9x{r;>1{v%2EZ(`$#Sv9OCCGk0SoJUuUiTH>&z_$K$(5U=NR^ zA4a+nFNz@@eNwusK_s3Rfd@6}*(3%JeOl7nRCgqV-b4q$)LuF+-Xml3{C_`h7v6po z{By@@_9|a7&?6uXzuozVevuab4ub__;a4I2-zt95j5lI{0;$S7avY2o?6pFI&^Kp` z{s#PK-jR2}fgdB{t#Rg$e;5Y>^*yCHSynyGp*TcJBh4YZsnQ+lhXzr9pmSy2wN7u* zc0Auh<@ zk1hPDMX)M(=)*<{-y>08?*?EJA|C14q~96!IH){cMr^!)BY0Z)LuhDtXHeuF*L_j9 zg300czU(4_uLfdoov%rEzO9ADK54cOr^jVTC0X2fC7L_ec@Kj#Qw4zo+cVL#PY-x}&SeEboN~)nxnGtsYz!(44?^LS6ccA3N zm85%ibkJK&0GwJ;serAcT=1-X-j990K!G%fnJU)rTGBL)aMi0$R%J2nK+Z&Xjjfq- z@Zn0w-W$P}4|l9A5bh7^5rCzOphD{u;aRB(oms1hY6WdI+!)p3kZ{|z_;x`ObGLaN zScQS(?^oY`Fy$11wU3hMOx5k$*AVj`*Y!6qE0Aag+Gaio6Sx%GN?dP)$1ggU| zbG4WepSJeRMz>mLEIj8UMM&RdMXZGzrub`A5IrtlaA(F-T}G+s*U8M*5Z#H`8x%F z&|S!dtJyjYS||YQj92U?sU#z>2 zlEHbwJ_!{*2$O_JDhLt!ETn9$1KqK-@ic|6CuZj##9?Jpp}t#by~uCHjOKQiKCO)a zcA^AkhP8?&<!w)=Q(5TB$TxOUs^mqM7Bu^Xo*cg=^s^al!IToh7< zv|bU98qMo=<3yhyXaN>e_71j&7`B?jzkfoIlLT~@rYiWU-UKxqrLDJH&#c)(A2*Ou z`J%flCLgp7McU-8J+FCHwqgynF~itq#Hy8b$SCsK+HXZV>w4ZJC>2j3GNCEcZRZq__Bzvm;HKl30^j&1syr+;6 z64b-Lz&BP0axK$Ij4i*}JxW}t_^X38gyp@G>%mTg}l# zCL$!h$<2kCp|i4##6J?zxs;tmY)zWqB57G-3MU@bf!sB%&YjX^3SqBO;A4TiPtS|7 zic5Nz+g*lkhr5R&jD(#Lj7yd7Uy!=kzP0hpkmN<|g?}c=jlpj8^l!!(M{4 zfF9Ne0*^cWpxb&qk8c<6YlF8bmt$BJ&hmzF{rZv;dgCkX$xipg)fHwYEl^4@Hn_S~ zrCZ>RjRSi)g z5KS3NtnP-+>o7o=fw@QaL-{5&qtXmrHkP|er1*QA9{T*Km*V@5No8Aafz8th;_2%Q zI@nM|T7#m$q;LYOfHb1if)t;mysPba6-9Rg9&=?K;-#+e%#T8G(MySRbSH&0Wqep{ zs@(@PTgO5kk-1<+I8cmU!)5L-=goCEoWU4g83BdfduAeMbVk@Itr=?`hg$*0G$Ha#5xFRKkip-uqb$b84W)n85Lmop&Q&&n%f zi+;gKE8e}U<{4^ymJOOt)Im6F8g_QP`976>V9)JT5kw{qT6&aiOmBF+8=uhu{5bxS z7h%U6W0cD0Mh}z+_Bg<6vRs${(Tf+eG zARr9X(({T$90SEaFyh6gk`5=d0C*d8o~-(TYY;9;a{punTj=c{130dDp<7<|w32hT z-<+<2kdZK;X009==_tvHZYLM zcZ)Y$*n>N)ZdNQ^UNrT#H0vX}Hwj|k)zY|^H9?LC?LMXFgmFvp)fEH?U-m(0!_R~a z$afCYl|4kncCZKF7xFWooUH|L&IRc0hjI^xj3zv2P?uxwPx!kb)YVsXu;_^VYqWvg z%Zez6kmzPGv3aVG_l7}6#tRljllz$6vI3fhFF!^w4w~&mrJ~}{N)LIUkNxPoE`9(p zL!Z}pKj|gi9i3eynIJZER9-U{U@neU0&C1cL+L}F?NfT87+MPZY z0U=DPAFBAds4L5(5EPvAstbUdaw3P6V_G(0f^KI$KOGJ|v_X!4ZZ3?nbtTsi*A$`( zGkqS|a(3y0!JZJ%NIX4r=^RkQFoS0TZ};|nL&XI82M->SUVr)P3{%X|j7DA6dsVa` zcS80<)9NZTLzyks^wz-E6M-vfKIsrP3tcfOMiP@Gf$4O5BoaR`)+-+!@~$Pgr!La- z7*2|*tJtSepFg2St!v!!BAH-3iT{8rv&NcQOJs29v<`xM+$fIHki2w6nS^1BQ9TlB zK(BqX=f%UbyuC|7ht+!L*~Bx??H?~|h2Q>EV?ci2EbZh~L6JZ+Zn9){Cbh8sE)%DY z_QY)rYI}Wkv15XN8(QO>>2>!jQ0D0rb5DN3U7KyBB}QvNodkQ50F=-^unzGYgKnN6 z@1aRs4)U>Mvp9512Q+YZ@L5md7*$sbw!#Fxq$cg5T*#0zH4j+hdYP*-w}X%^CafvbCYfoxijHpklu`}u=3LFsqWwq29rYmc1%2astV@{*qxKp>S=ttSa_eSz$p0VMZze^%fOMp z)B(X6Bi)PYe6G&-XpHs70c+@TyZfHgJ^5T1@#wJ9uZhy!`mbL}D5U6$Pc!VgYAFG| zk^ZMaRVV0{z+GZuqcvjsNm~k->t1qz_YnDZXUF8>Wx0#`XX7r|nj1B#GXw^p2~gZ! zkMIzXbWEGg>v~0M+;)TVsNYpsMde*R zJ*?*D%QH=<4h@xzT{bIYimr8wEOH@bp|*sV0Clc=!&9G(Y*X4}^1m zpObiqf>KXLqEI?ahd2%CBf02)=auA&g3g;NkZ{o-^4xx*3D};1BQU`n_xZtXO`LYa zb$wO%b1K|#4v1qFGesbJJZIX6|8)&Bw6WX9m2k-a8VF#hufG6?pwS#L03b7r2v!E% zfVWe-w|tMqXu>L1o=t4TQFqIRnLU`in!?Ck9Aa|(n)`X&7s8uN3FjDj>jXeP7H4J$ ziiq`R+SsL~;7Up6@Ot%9ZySUwH%1&-HJIxiRbhUYWD}OBDbH3?q&Tm-jgdvHt&Bv2 zr`lEk(zvDI#utW3_#Tg?;I*kxdzhq8RP?qkH^%VVgzL!|O{}B(?)TSzH53IBVz}m- z@`^F-yPfsV@lAxCg(yEz32ns!e%8JtXOCjWaKJ|N?>0U#u5(AvjmAxiOkVC3n-`|i zQ%1%H`?uS=ysmOS0ZQcORS-Q5e9MIh4$qqZ%HEn%6CCo}>-k!ghcvjEgmN}1#Xpa3 z`m-z-OQpC(L9EurXHOR(*iBOjQIA>J%A<$$@u~Qi(Es-muU}kF0)}8}L?ZRFEe6Y@ zmGeCQ)`4wp)cfJKs+=a8lR7QJnT{o?YOAI%3yQ}h=mQWsCnSR8nyo@&+T<%LV0Yt! zTc+Y!;qK2Uhb~GuE^PqkJ?MBNJ)CO<*5T?2D50W33$RFuhPGO}KUd5mhova6s@SfS)a4mJ;aG_*PMC!m>!LG<+_>Rur(A z^foxOjXbLV^~JpU9R~2ItH;$pOVR-G^V_F?rP;~n zpC4|qvw-?B8_+s{v=sqnrlqY2{5&Q*mua2hAOwU_WS=+)pngjJo3H_#Z3i(} z77ZX??rW0qFwj&3$M@1&{u4D6CP@D<6AD#e_+&u5!`l~<#Zd|h?gXJ~;$Q}*@n6!a z?8VPOLq7(~_@lwCPe7A7y6QwH8FJWJT}Tb`)uF$3?c`3gcEG1kQ#Mxu{ZpYvkKgFK zBt2?eG*x~rgRP=+S858?nB{~fJP`t;2+9#8uiY+evJ~{YkQ;gf?gr>Nn-T8R zI9q1XFL;VSg?~fKj6rDSwLVz}M@HTA;CH)|@m)Yl#Nk!Y;!cQ!l^I9_fXl-01Zqu6 z9!n{Y6AA7xScV-XDb^#QQOcZ%_U;tvFE_wkJm$vljqGt>lZhKdXyEdcG=6I4=u#8a z{REQTw#olq(A;A`+KiS4lwh_MTspT^AnzSQPgipTW@5Hgq#LS%W8U@D6nyJMx4!W- zCIo9u!s`Pr4Bd!C;A?95q&Osg7py(K&nIB>O+9mP828Ui2F!@HlU*uKw?pYyZ;-jOr+{#_qxdFj-Dm5 zY-51m%%j>DlK_4E;tnN>Au((nxsvm)pAV&NwI?{x&y$CL3sCv)b~6-=03?_r|F=o_&(h5OXFe(X;a1xxR~ zqnhgYlS$o&u&4}0_myrj*djTsH}q|-8E4$zU>N*83Czo5O6Tb_i9?_^e6iG*C_!yG zb-Y4Ng+^i__VnplY?6p8xKj@hr(i!)n{hyO;dRy1mOwt*T){z+bG1zFf0ezRIWIO2;!>a4EE7rn}{fHs6O z9|v6GqU%$$HY|(cOA1#HEOi4K+7&C#7`4TP$Jp?VwZHuJ`^cq6EW4wVZTst7Sf_*V zt@uRZG}xHr4%JO||EBJ&y2ea6qRQ4NAG^{$N0_5u>GO8$1i%eD%(St1^qenz-!J|*eZ5$eo{Q`|y`;OH$77d1H+40n^sK=PZj9o( z5u*hkw1Nj8YxI`ep4YSv`|R;rSjrAJI3v=0oC~Y6qVx{91a=zoM8QJXSX1dImt zTvj_~p4FZ^UcK*spIu<@1!NIOtU*&?Hgz22mpvQ97FXVhQRCD+aOL-TlMNszZz#w(@yW4^!eLzKTV%|pjL-DM$9fTecF*br0nzO>fBoCSHZoz%KNpc;fh5$|DIkXmR^Mbues9PU}m&<=*HbZ+(Zi)$%I!MXtYlJ%>+lhd=Yf5 zFGHA*N;^Ylva9n~282nnMghk4UQ&YNz^J&N;$&=S9*DyG-gyuQV$$Kr^w~Kwr;$iu zP11|8)|AL8K6~xlAN|CCyPPK+g#<+h<~MdlyGgZSa8ka!v_j1Rrba&Tq{-tFP`jm9uDZ1uj zB~2f8{XqFV_QJ@RERFEUIV){f#FsrZ_d?)~>I;as74~N2ENs)#`VQM;`3+O&&wRvG@|1w z24L^l=qe>bc0+#@>Ae~tfgQUo6FKaE>}c7&D1UgIN`l5v&_u2|Gwm=eH~BRX%qI)E z-;YWK>Q60*(T{6#LyYB<3_^EXUNL1=0-5drh0ox1+kXuEA(ifC54^wjIqVBC0@!{DmreN{YsGvpT7depG&Y|MN!a>F@hw zMt&U;oi`JSBk1kSBg4(>q{!;!UDnm?vd) z3#T5>l%ctNPrDxph&9*sM%X=k3Rt8bLEU#@g>6}UEkU0K?HouYkZ7kEeP!{(7CB!V)D|u1Zx|dCbskNPMzOU?Olpn^7yE*@AgV zIkWP})wmQ}3s8B0-@`h(urmc6_yT0#uJ>C0y$=s9gXv%oH)_RUra2d)5ebF>*D*)1 z3nPsgZbFZ|*@tiGgMBHn(YPxGJW{*Jjwcfz5k|6I8e1S7GKNR8Sh-ffvor)P(*&7Ecw`Qrbv zDSHkKX;lTZO;Q4ygQ=yW>VP_H8OC%E$|GE|IdGy>hGL$hQU6$beXCeBNir3lHqo_r zJr@!2VD2%1f1#APSFJK7oQ-E<_7Y@=$-q9y$YwvRQtJv{$WrJKw(H0eLgJ}V4(F|1 z^z=p97<`|Xwg%nTOf052}gQ_q}bH0%!w`_z7V_wSD>hr1(F zX1cu4c9P&0mJ~3dGJ$nEaA&-5jbYgd==$(Fac=3!6fLO(-UBtpyb}Ukd{T^CraDFi z8;=~(Cu)7&_24kJzI5|$Cg5cOF3oZz8tQ5Lmeu|M%vW?RBBs=Bq~z`QK3u6NWEtSV z5)yeUS3xxs(6@l(U>yLk(u&H1CXUE?`){E~1{gbY9l&{SL}JP>Aq-Lrpp58`ZU8C7 z(aWl{d+bV~82eG+!AX2QMf%i|0$_+{-XXsJ3400zU{`9dyC$i(j(`r_{I&NqjYWOh zSiiENmVYxQ404_?o@i7^!}s0>%7)Ge55jW6Klu~ycx8c7E80j5JdWjKWDlu1d+2_p zGuypU&^!dBbvS?zjDeS-!K8S}P<@Mhr4;ugd@XnlphB^1jx!3*8t2DiNQf8bNG3+ z?vEx?GBHKp+Z^?$ZzG9L7?iFYmiQ-X-uwWZ0I1ttDnRm;0CB!gHBAxuo=cet*<~X8 zTB(_SmIs5Ak!e`MBf56qN@}`Gcqy+a=C51EtZ4&DfD9yxV!Ch+NL20R9 z`FuZ8<-V^ye`LW3#@fP;8rOc@_mw)_6O8O5P5_v1luNaovCI-V$)(E`-`^5BXy2!B znl^T##^t>D(1;Aji_d?=0lcku8r9{LeYUqjq8yFkq&`;X#66UQ%1pGqJi7Q|r0e&F z+v+%HcnTG6Z_j7YeX+1(@BJdB^8DA^4~e^ooge-Jth2G2T8jhge<$yV;lkSzb}opS zZkYip4p-!JPBMk*suZKkH3uIud8gaC87c2d^5$U!61kv{Z659@Qt{D;FT=HY(FBxZ z&!+V^dKWl$)-FyvZ@+16%iov~BK>YX^&-%ix}cn(Jpp3(fSz$jdLvT18B$Ksaw~jy z!NF-kCF63;A#1T7TMo#Q8*NTd=BtdF)$9vF&lE({KOnd+v1S5c?DgFXHZQ$qNFhi}EKa&?Pw%-wyNd z32!PcKzQ0Uta%OWB!7}Ow%H@pxbXO-H~j1j)0Q_EgqPhoB*&X>W)%Fkp{35y5|Na^ zHJT+aAyV^>vpy)_U@Pw$obd6v9lOgnPI1rA!OV~}4W~3cV5iY?S>;ZJtd^z!KD1Be zqE9G7k8~1&nnbyKw2e=KDEHn zBGWY*>M><@D3wE3q*bmpz^)fwJ2+P~dBItHY}BNJHQnMIJx)u19Ort~QFz*e+sDSK zFFl4;=YJ1&O7j!wGuln+%6>`IjDdH9wE=J@cb_i9`%_Msm}0L$pGQ|DLsN~_s%e~FD6I$k*jjLC&%?sv5P@_ ziHI+01UvOT;&`vsCX~qx&dJXRlLw)o#JS{BfLXGFNvba=Y0^5tB#(HbCmDwu+In~F zOt>irdjsqw^fsfsT8{ZNgdUVzC)p0_G_h<(4QTO3K>mDscy=wiXW*|k$@4#bRu=y~ zSv5Fv6k*+C$;$q;QJ({B9J(?@;4%ap97J91_3mUa|9+S&HA*qTx)Cxnrct)>hn4IF zoA+x^sBuGJ&K-MOX_M0$LnO}O;%o}XZ%FM@=v!nFI%_;&yKO5!Ym>tZGX&$75!+G& z`-7yommr6u*U62wI+#aNh>ow5QlsFN)8@2*Vrt81O>idc~u`*<{)pF|CL; z5`<0Bpe8ITy-c?7*fe$doQQ5VEXEaFPzU`NFO&aoZS)nMLp`0@S}nhChO!eeuSSK@dAcgu?#7gQ}r)R@}U>h1wFL>y6Voa9^(w!=VJBL|GeG8+8;R zxR~Fla&QHq5O$WuawoHV@KPL>6nZj`jMegr!+Zy?2NH>fzf&y}25(*%hOafmV^g|7 zzd!t9s`^?ff{dz$4b0W7RR3QZys?>Ulq!!*UA95PK7ie-i)Z)2|1_%#xnqau z9ANePOdrK+pn_lh6=Z?j5GY)o=(95@Tf$*T?6*@88G|uLBWUBmRE6t0=;?oq~(Vj --image --key-name default --nic net-id= --security-groups= worker - -2. Create a floating ip - -:: - - $ neutron floatingip-create - -3. Assign a floating ip to the instance - -.. note:: - - You can view all the ports by issuing `neutron port-list`. - -:: - - $ neutron floatingip-associate - -4. Login to the instance - -:: - - $ ssh root@ - -5. Do steps in 'Common steps' - -6. Install HAProxy - -:: - - $ apt-get install -qy haproxy socat - - -7. Install python-gearman - -.. note:: - - This is a custom version with patches commited upstream but not release yet. - -:: - - $ pip install https://launchpad.net/~libra-core/+archive/ppa/+files/gearman_2.0.2.git3.orig.tar.gz - -8. Install dependencies using pip - -:: - - $ pip install -r requirements.txt -r test-requirements.txt - -9. Install Libra in development mode - -:: - - $ python setup.py develop - -10. Install an Upstart job - -.. note:: - - You will also need to copy your libra.cnf to the worker machine, and update libra-worker.conf to use it (the default is /etc/libra/libra.cnf). - There is also an additional logging configuration file to install. - You may want to test that the service starts up appropriately before moving to the next step. - -:: - - $ mkdir /etc/libra - $ wget https://raw2.github.com/pcrews/lbaas-salt/master/lbaas-haproxy-base/logging_worker.cfg -O /etc/libra/logging_worker.cfg - $ wget https://raw2.github.com/pcrews/lbaas-salt/master/lbaas-haproxy-base/libra-worker.conf -O /etc/init/libra_worker.conf - -11. Make a snapshot of the worker image - -:: - - $ nova image-create worker libra-worker - -12. At the libra-poo-mgm node change the 'nova_image' setting to the value of your newly created snapshot - -.. note:: - - To get the ID of the snapshot do - nova image-show libra-worker | grep -w id | cut -d '|' -f3 - -:: - - $ sudo vi /etc/libra.cfg - -13. Restart libra_pool_mgm - -:: - - $ killall -9 libra_pool_mgm - $ libra_pool_mgm --config-file /etc/libra.cfg --log-dir /var/log/libra/ - -Verifying that it works -======================= - -If you have done all correctly you should be able to do something like the -below command on the node that has the :ref:`libra-pool-mgm` - -:: - - $ less +F /var/log/libra/libra_pool_mgm.log diff --git a/doc/install/diskimage-builder.rst b/doc/install/diskimage-builder.rst deleted file mode 100644 index 4b253fe9..00000000 --- a/doc/install/diskimage-builder.rst +++ /dev/null @@ -1,113 +0,0 @@ -Diskimage Builder -================= - -Building Libra Images using Diskimage Builder. - - -Setup the builder - Manual way ------------------------------- - -1. Set DIB path - -:: - - $ echo 'export DIB_PATH=$HOME/diskimage-builder' >> ~/.bashrc - -2. Clone the repository "git://github.com:openstack/diskimage-builder" locally. - -:: - - $ git clone git://github.com:openstack/diskimage-builder $DIB_PATH - -3. Add DIB bin to PATH and DIB directory to your directory to your env. - -:: - - $ echo 'export PATH=$PATH:$DIB_PATH/bin' >> ~/.bashrc - $ . ~/.bashrc - - -4. Setup some variables - -:: - - $ echo 'export LIBRA_ELEMENTS=$HOME/libra-elements' >> ~/.bashrc - $ . ~/.bashrc - -5. Clone the 'libra-elements' repository - -:: - - $ git clone git://github.com/LBaaS/libra-elements $LIBRA_ELEMENTS - - -6. Export the following variable to your .bashrc. Then source it. - -:: - - $ export ELEMENTS_PATH=$DIB_PATH/elements:$LIBRA_ELEMENTS/elements - -Setup DIB using bootstrap.sh ----------------------------- - -bootstrap.sh is a script to bootstrap your environment for DIB and libra-elements. - - -It does: -#. Install deps -#. Add some vars to ~/.dib_profile and your ~/.bashrc -#. Clone / update the repos. - -Simply run: - -:: - - $ curl https://raw.github.com/LBaaS/libra-elements/master/bootstrap.sh | bash - - -Supported distros ------------------ - -Currently the supported distributions for DIB are: - -.. note:: - - There are not support in the elements nor in the packages for anythign else at this time - -* precise - - -Worker image ------------- - -To generate a worker image, do - -:: - - DIB_RELEASE=precise disk-image-create "libra-worker" -o libra-worker.qcow2 - - -API node --------- - -To generate a API image, do - -:: - - DIB_RELEASE=precise disk-image-create "libra-api" -o libra-api.qcow2 - -Or to put both the API and Admin API on the same image - -:: - - DIB_RELEASE=precise disk-image-create "libra-api libra-admin-api" -o libra-api.qcow2 - - -Pool Manager image ------------------- - -To generate a API image, do - -:: - - DIB_RELEASE=precise disk-image-create "libra-pool-mgr" -o libra-pool-mgr.qcow2 diff --git a/doc/install/index.rst b/doc/install/index.rst deleted file mode 100644 index 6768a83f..00000000 --- a/doc/install/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _install: - -Installing Libra LBaas -====================== - -.. toctree:: - :maxdepth: 2 - - development - production - ppa - diskimage-builder - verify \ No newline at end of file diff --git a/doc/install/openstack.rst b/doc/install/openstack.rst deleted file mode 100644 index c9a3735d..00000000 --- a/doc/install/openstack.rst +++ /dev/null @@ -1,298 +0,0 @@ -.. _install-openstack: - -============================= -Installing Libra on Openstack -============================= - -Libra can utilize OpenStack as it's platform to provide LBaaS either for instances -that run inside of a OpenStack enviroment our inside. - - -Architecture -^^^^^^^^^^^^ - -Please see :ref:`architecture-production` for understanding the general -production archiecture. - - -Requirements -^^^^^^^^^^^^ - -* OpenStack cloud to provide the underlying IaaS functions for Libra. -* User and Tenant with required privileges / resources. -* Ubuntu 12.04 Precise x86_64 image for instances. - -Instance flavors ----------------- -* :ref:`libra-api` / :ref:`libra-admin-api` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk) -* :ref:`libra-pool-mgm` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk) -* :ref:`libra-worker` / :term:`haproxy` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk) -* :term:`gearman` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk) -* MySQL Galera (:term:`database`) - m1.medium (2 cpu, 4gb memory, 10gb root disk, 40gb ephemeral disk) - -.. note:: - - The worker flavor needs to have unlimed or high BW capabilities if - not traffic might not get through and it will suffer from network - congestion. - - -Commands / Tools -================ - -Nova Boot ---------- - -:: - - $ nova boot --image --flavor --availability-zone - - - Example: nova boot --image ubuntu-precise-amd64 --flavor m1.small --availability-zone az1 libra-gearman-az1 - -PDSH ----- - -Use PDSH if you don't want to have to do stuff like for loops with SSH loops or alot of manual SSH's into boxes to do steps. - -1. Add the following to your ~/.ssh/config - -.. note:: If you don't to this pdsh will fail due to hostkeys that are not known. - -:: - - Host * - StrictHostKeyChecking no - -2. Create a file for the group of instances you want PDSH to target - - Example contents: gearman - -:: - - 10.0.0.4 - 10.0.0.5 - 10.0.0.6 - -3. Run pdsh with ssh - -:: - - $ WCOLL= pdsh -R ssh - - Example: WCOLL=gearman pdsh -R ssh uptime - - -Installing pre-requisite services -================================= - -We want to setup the services like Gearman and the Database instances before -installing the actual Libra system. - -Gearman -------- - -1. Create 3 instances for Gearman using the command in `Commands` - -2. You will end up with something like - -:: - - | aff72090-6f5e-44c7-9d35-674d92f0ba82 | libra-gearman-1 | ACTIVE | None | Running | os-net=10.255.255.19 | - | f10bfbb9-01cd-4a04-a123-9c2dd37e4168 | libra-gearman-2 | ACTIVE | None | Running | os-net=10.255.255.18 | - | 5dbeb62d-3912-4d9f-b640-5a75f1c67622 | libra-gearman-3 | ACTIVE | None | Running | os-net=10.255.255.15 | - - -2. Login / or script the next actions - -3. Do steps in :doc:`ppa` for each instance - -4. Install Gearman instance - -:: - - $ sudo apt-get install -qy gearman-jobs-instance - -5. Change Gearman to listen on all addresses - -:: - - $ sudo sed 's/127.0.0.1/0.0.0.0/g' -i /etc/default/gearman-job-instance - $ sudo service gearman-job-instance restart - - -Database -======== - -http://www.percona.com/doc/percona-xtradb-cluster/howtos/ubuntu_howto.html - -1. Create 3 instances for Gearman - -2. You will end up with something like - -:: - - | 60b2d90a-a5a6-457b-8d4f-4b5575033c44 | libra-db-1 | ACTIVE | None | Running | os-net=10.255.255.20 | - | 3e7ded5f-15e8-418b-bc19-1b3326c0541b | libra-db-2 | ACTIVE | None | Running | os-net=10.255.255.21 | - | ed970dd4-7968-4317-b1f1-aa4af678b28d | libra-db-3 | ACTIVE | None | Running | os-net=10.255.255.22 | - -3. Add the Percona PPA - -:: - - $ sudo apt-key adv --keyinstance keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A - $ sudo sh -c 'echo "deb http://repo.percona.com/apt precise main" >> /etc/apt/sources.list.d/percona.list' - -4. Install Percona instance on each instance - -:: - - $ sudo debconf-set-selections <<< 'percona-xtradb-cluster-instance-5.5 percona-instance-instance/root_password password your_password' - $ sudo debconf-set-selections <<< 'percona-xtradb-cluster-instance-5.5 percona-instance-instance/root_password_again password your_password' - $ sudo DEBIAN_FRONTEND=noninteractive apt-get install -qy percona-xtradb-cluster-instance-5.5 - -5. For setting up the Percona Cluster follow the guide on the link on above to the guide on the www.percona.com pages. - -6. Create the Libra database and a user with grants to it - -:: - - mysql > CREATE DATABASE lbaas CHARACTER SET utf8 COLLATE utf8_general_ci; - mysql > GRANT ALL ON lbaas.* TO 'lbaas'@'10.255.255.%' IDENTIFIED BY 'lbaas'; - mysql > FLUSH PRIVILEGES; - - -Worker image -============ - -1. Create a instance that will become our template for workers. - -.. - - $ nova boot ... worker - -2. Login to the server - -3. Do the steps in :doc:`ppa`. - -4. Install the :ref:`libra-worker` package and dependencies. - -:: - - $ sudo apt-get install -qy libra-worker socat haproxy - -5. Configure the [worker] section in the configuration file. - -.. note:: See :ref:`configuration` for information about options - -:: - - $ sudo cp /usr/share/libra/sample_libra.cfg /etc/libra.cfg - $ sudo vi /etc/libra.cfg - -6. Make a snapshot of the image and take note of the ID (We'll be needing it later) - -:: - - $ nova image-create worker libra-worker - $ nova image-show libra-worker - -7. Shutdown the instance - - $ nova delete worker - - -Pool Manager instances -====================== - -1. Create 3 instances that will run the :ref:`libra-api` and :ref:`libra-admin-api` - -2. You will end up with something like - -:: - - | d4e21f7b-aa1b-4132-83e7-6cd5281adfb3 | libra-pool-mgm-1 | ACTIVE | None | Running | os-net=10.255.255.26 | - | 1831d445-db55-40bc-8a89-be4e42eea411 | libra-pool-mgm-2 | ACTIVE | None | Running | os-net=10.255.255.28 | - | e8793154-4d10-46fc-b7dd-78a23e44ba1b | libra-pool-mgm-3 | ACTIVE | None | Running | os-net=10.255.255.27 | - -2. Login / or script the next actions - -3. Do steps in :doc:`ppa` for each instance - -4. Install :ref:`libra-pool-mgm` - -:: - - $ sudo apt-get install -qy libra-pool-mgm - -5. On the first instance configure settings to your env. - -.. note:: - - We'll create a configuration file on the first :ref:`libra-pool-mgm` - instance and copy it to the rest of the API instances and later - :ref:`libra-pool-mgm` instances so we do less work :). - -.. - - $ sudo cp /usr/share/libra/sample_libra.cfg /etc/libra.cfg - $ sudo vi /etc/libra.cfg - -.. note:: - - See :ref:`configuration` for configuration options. - -6. Copy the configuration file over to the rest of the instances. - -7. Restart the :ref:`libra-pool-mgm` service on each instance. - -8. Check the logs for errors. - - -API nodes -========= - -1. Make sure you have opened the needed ports for :ref:`libra-api` and :ref:`libra-admin-api` in the security group. - -2. Create 3 instances that will run the :ref:`libra-api` and :ref:`libra-admin-api` - -3. Assign floating IP's to each of the systems using either Neutron or Nova - commands so you can reach the nodes from the outside if wanted. - -4. You will end up with something like - -:: - - | 27ae4d83-792a-4458-bdb0-4e13e8970a48 | libra-api-1 | ACTIVE | None | Running | os-net=10.255.255.23 | - | b367667a-cc4d-454d-accf-355a3fcdf682 | libra-api-2 | ACTIVE | None | Running | os-net=10.255.255.24 | - | c659c9a3-260a-4b85-9a1a-565549c9ad44 | libra-api-3 | ACTIVE | None | Running | os-net=10.255.255.25 | - -5. Login / or script the next actions - -6. Install python-keystoneclient - -:: - - $ sudo apt-get install -qy python-keystoneclient - -7. Do steps in :doc:`ppa` for each instance - -8. Install latest version of Libra - -:: - - $ sudo apt-get install -qy libra-api libra-admin-api - -9. Copy the configuration file from one of the :ref:`libra-pool-mgm` instances - to each instance. - -10. Restart :ref:`libra-api` and :ref:`libra-admin-api` on each instance. - -:: - - $ for i in api admin-api; do sudo service libra-$i restart; done - -11. Now you're done with the API services - -12. Check that the logs have any errors. - -13. See :ref:`install-verify` to verify that the system works! diff --git a/doc/install/ppa.rst b/doc/install/ppa.rst deleted file mode 100644 index 2b345f10..00000000 --- a/doc/install/ppa.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _ppa: - -========= -Libra PPA -========= - -Currently we require a PPA that is provided by the HPCS LBaaS / Libra team in order -to get the right versions of the dependencies. So we'll need to setup a PPA. - -To add it to your Ubuntu node follow the instructions below. - - -Adding the PPA -============== - -1. Install a utility package - -:: - - $ sudo apt-get install -qy python-software-properties - -2. Add the PPA - -:: - - $ sudo apt-add-repository ppa:libra-core/ppa - -3. Update package indexes - -:: - - $ sudo apt-get update -q diff --git a/doc/install/production.rst b/doc/install/production.rst deleted file mode 100644 index 85ac68af..00000000 --- a/doc/install/production.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. _install-production: - -====================== -Production environment -====================== - -Libra is a system to provide LoadBalancing as a Service on top of -various platforms. It is comprised of four components :ref:`libra-api`, -:ref:`libra-admin-api`, :ref:`libra-pool-mgm` and :ref:`libra-worker`, -supported by a few other open source components. For more information see -:doc:`/architecture/index`. - -These guides will help you through the installation of a production setup of Libra. - -Below you see the different systems that Libra can be installed / runned upon. - - -.. toctree:: - :maxdepth: 2 - - openstack \ No newline at end of file diff --git a/doc/install/verify.rst b/doc/install/verify.rst deleted file mode 100644 index 00e603e9..00000000 --- a/doc/install/verify.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _install-verify: - -======================= -Verifying functionality -======================= - -.. note:: - - In order to do the following you need libra_client installed on your system. - -libra_client --service_type=compute --insecure --bypass_url=http:///v1.1 list \ No newline at end of file diff --git a/doc/pool_mgm/about.rst b/doc/pool_mgm/about.rst deleted file mode 100644 index 97e6f3f8..00000000 --- a/doc/pool_mgm/about.rst +++ /dev/null @@ -1,19 +0,0 @@ -Description -=========== - -Purpose -------- - -The Libra Node Pool manager is designed to communicate with Openstack Nova or -any other compute API to provide nodes and floating IPs to the libra system -for use. It does this by providing a gearman worker interface to the Nova -API. This means you can have multiple pool managers running and gearman will -decide on the next available pool manager to take a job. - -Design ------- - -It is designed to accept requests from the Libra components to manipulate Nova -instances and floating IPs. It is a daemon which is a gearman worker. Any -commands sent to that worker are converted into Nova commands and the results -are sent back to the client. diff --git a/doc/pool_mgm/commands.rst b/doc/pool_mgm/commands.rst deleted file mode 100644 index 667600ae..00000000 --- a/doc/pool_mgm/commands.rst +++ /dev/null @@ -1,154 +0,0 @@ -Gearman Commands -================ - -The Pool Manager registers as the worker name ``libra_pool_mgm`` on the gearman -servers. Using this it accepts the JSON requests outlined in this document. - -In all cases it will return the original message along with the following for -success: - -.. code-block:: json - - { - "response": "PASS" - } - -And this for failure: - -.. code-block:: json - - { - "response": "FAIL" - } - -BUILD_DEVICE ------------- - -This command sends the Nova ``boot`` command using the Nova API and returns -details about the resulting new Nova instance. Details about which image and -other Nova settings to use are configured using the options or config file for -Pool Manager. - -Example: - -.. code-block:: json - - { - "action": "BUILD_DEVICE" - } - -Response: - -.. code-block:: json - - { - "action": "BUILD_DEVICE", - "response": "PASS", - "name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9", - "addr": "15.185.175.81", - "type": "basename: libra-stg-haproxy, image: 12345", - "az": "3" - } - -DELETE_DEVICE -------------- - -This command requests that a Nova instance be deleted. - -Example: - -.. code-block:: json - - { - "action": "DELETE_DEVICE", - "name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9" - } - -Response: - -.. code-block:: json - - { - "action": "DELETE_DEVICE", - "name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9", - "response": "PASS" - } - -BUILD_IP --------- - -This command requests a floating IP from Nova. - -Example: - -.. code-block:: json - - { - "action": "BUILD_IP", - } - -Response: - -.. code-block:: json - - { - "action": "BUILD_IP", - "response": "PASS", - "id": "12345", - "ip": "15.185.234.125" - } - -ASSIGN_IP ---------- - -This command assigns floating IP addresses to Nova instances (by name of -instance). - -Example: - -.. code-block:: json - - { - "action": "ASSIGN_IP", - "ip": "15.185.234.125", - "name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9" - } - -Response: - -.. code-block:: json - - { - "action": "ASSIGN_IP", - "ip": "15.185.234.125", - "name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9", - "response": "PASS" - } - -REMOVE_IP ---------- - -This command removes a floating IP address from a Nova instance, preserving -the IP address to be used another time. - -Example: - -.. code-block:: json - - { - "action": "REMOVE_IP", - "ip": "15.185.234.125", - "name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9" - } - -Response: - -.. code-block:: json - - { - "action": "REMOVE_IP", - "ip": "15.185.234.125", - "name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9", - "response": "PASS" - } - diff --git a/doc/pool_mgm/config.rst b/doc/pool_mgm/config.rst deleted file mode 100644 index 499656b8..00000000 --- a/doc/pool_mgm/config.rst +++ /dev/null @@ -1,135 +0,0 @@ -Pool Manager Configuration -========================== - -These options are specific to the pool manager in addition to the -:doc:`common options `. - -Configuration File ------------------- - - The ``[mgm]`` section is specific to the libra_pool_mgm utility. Below is an - example: - - .. code-block:: ini - - [mgm] - pid = /var/run/libra/libra_mgm.pid - logfile = /var/log/libra/libra_mgm.log - datadir = /etc/libra/ - nova_auth_url = https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - nova_user = username - nova_pass = password - nova_tenant = tenant - nova_region = region - nova_keyname = default - nova_secgroup = default - nova_image = 12345 - nova_image_size = standard.medium - gearman=127.0.0.1:4730 - node_basename = 'libra' - -Command Line Options --------------------- - .. program:: libra_pool_mgm - - .. option:: --datadir - - The data directory used to store things such as the failed node list. - - .. option:: -n, --nodaemon - - Do not run as a daemon. This option is useful for debugging purposes - only as the worker is intended to be run as a daemon normally. - - .. option:: --node_basename - - A name to prefix the UUID name given to the nodes the pool manager - generates. - - .. option:: --nova_auth_url - - The URL used to authenticate for the Nova API - - .. option:: --nova_user - - The username to authenticate for the Nova API - - .. option:: --nova_pass - - The password to authenticate for the Nova API - - .. option:: --nova_tenant - - The tenant to use for the Nova API - - .. option:: --nova_region - - The region to use for the Nova API - - .. option:: --nova_keyname - - The key name to use when spinning up nodes in the Nova API - - .. option:: --nova_secgroup - - The security group to use when spinning up nodes in the Nova API - - .. option:: --nova_image - - The image ID or name to use on new nodes spun up in the Nova API - - .. option:: --nova_net_id - - Specify which Neutron Network ID workers should be started with. - - .. option:: --nova_image_size - - The flavor ID (image size ID) or name to use for new nodes spun up in - the Nova API - - .. option:: --gearman_keepalive - - Use TCP KEEPALIVE to the Gearman job server. Not supported on all - systems. - - .. option:: --gearman_keepcnt - - Maximum number of TCP KEEPALIVE probes to send before killing the - connection to the Gearman job server. - - .. option:: --gearman_keepidle - - Seconds of idle time on the Gearman job server connection before - sending TCP KEEPALIVE probes. - - .. option:: --gearman_keepintvl - - Seconds between TCP KEEPALIVE probes. - - .. option:: --gearman_ssl_ca - - The path for the Gearman SSL Certificate Authority. - - .. option:: --gearman_ssl_cert - - The path for the Gearman SSL certificate. - - .. option:: --gearman_ssl_key - - The path for the Gearman SSL key. - - .. option:: --gearman - - Used to specify the Gearman job server hostname and port. This option - can be used multiple times to specify multiple job servers - - .. option:: --rm_fip_ignore_500 - - When removing a floating IP, ignore the HTTP 500 error and treat it as - a successful response. - - .. option:: --tcp_check_port - - After a floating IP has been assigned use this port to do a TCP connect - test to see if the assign was successful. If not specified the check - will not take place. diff --git a/doc/pool_mgm/index.rst b/doc/pool_mgm/index.rst deleted file mode 100644 index a3c9d61e..00000000 --- a/doc/pool_mgm/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _libra-pool-mgm: - -Libra Node Pool Manager -======================= - -.. toctree:: - :maxdepth: 2 - - about - config - commands diff --git a/doc/sources/libralayout.odg b/doc/sources/libralayout.odg deleted file mode 100644 index 579f46bd815765804ab0c8f928b33a4fc3d00115..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16493 zcmd73b9|-C(gqqQlT2*ewylXVF(ik&9^R}PaRoW zWHoTsQS-bk7M)0|J2@8ZNIRut8*!R7MK(^?NTZ)<<^%bX;XyzU3{ii9vz57r7VQWD z8YFVxNTS_(Eb&gS>PW0J*%fSv^x0KW5? z;`?iN%f;5#cHQlQ+j;jK;qvLc9sX|1C74a=!a41y1l(0i4#I5E!Il~pz?1v5Y|E|4 zzhG#2&zV+GQPQJF7}k+L4bKidx7yGnKDIC&_d-3s`DmA~T$hT>+0b05Pl?WCfQ2Y5 zm&i*=r#jv|Gb_5Sq}TV8Mtas54_|j}{&1v?c)kRxU(*B};d{Kfx$!GIy%?Y#3GmtQl=xPKT`Vzi)~TyaL$5p%Ml?Cno_r4u%r&(z`>;( zFI~or@fD+if{7}9O(Kt{&D)3}vq_DL4pQ~@)YiH6jPxFtk;;&ftK)irMxJD=rX=4% z2X!{c2`};6<|I8hMie+?aKDBI>i?Q6Gbre;r3&WMZ!kRktMW$hGaN`|rPM26U32malI}#yQ$S1>GotYxxS>(0s8c) zH(9QAdU?@-pIH0(JYNE6dp;S+%Oi+oaXMLU_l1}(RIb$-O@<;8S?g)1tv_=|1Hwd>!};IP9f=p_loAEWqw%^s8RKz#&g>c8)x7p0lBt2i*m8_@b}OO4H5eTZu_(P z{oYn3<&4DPN#ex!TPmu|Z4w*=w^f+bTkSSGIFG192Z9c4-{ITXMf}9c?%4=o3I`77 zn9*ny!w56cz0%urH=cUla6_y%n6+Ch>C}b749P1u7@k93fsi~o+Nn))JvyAOeQ9{q zQft1s9ArKT)ad-8>U<`-X?K)}md8RQ_IdE-5vPc#ppbZ81?T{~!|^VmcXg3ukfX+@1w!T`} zI3Ys#wY?N7K^(YPBkzp5>-OBag(dD_W5YhFP+U3#2~ENTJXu7^$)r@XxE_@+nV{Kb z>Sxt^HN!0uH>BT~K`~`D20_(sQr@E1s3xq~B!$nfOU6smGrz2JG_OjHA{Vs|mW9IJ zGry*1@T=$AOj(bF2m}mVS&J5%7hek<4J0K4!{KZ^w0SrR6ckiBKEdBVKy+|`e|YGZ z93i1-o%7v09kWOYjaNz4W9ROx1zMb^lN;ICF0~ToR|Y(2_Mc5bVy=T|>dGq@BkNg< z9zE-;NI(carDD?YF{}(8m*>h$AIGaDW#ki`)uwNSAIqH=+V3}0cVz*;5Vv_>9(zxn zHZBy(J`P|z)jvieDN!dK985#6C#zOoDNew+K)&lu7KMS8(a^vN`F-61V*n!ldcVPO z0l2$+uPL4qJ$0#`uvZ`q8Zu+pHFG_!>&Ktk)lFHjQJ^BuNq#k);yv<%8_D zuFlwJsrqV5XG7A({rS6uvZ(^LOtW2hyp-CB^=8S_q4Bs-R37^brd?$|BJz=1lW-~C4iBb#iSjw)W;?NUtTDXL&L z2S)1n)DaK8Fjy}=T4gUAu<3zF1UcXNpnfZptJ8kc5we+=5W>-@a_>$=Y_5^A0Y9o9 zOGE zfH+z)BAV zl9f`#+neD0ieaR8TWs7eA9pW1c7_bKRLtIEOL#hsi|U398bLgd8A*aV<;TfK5)h?o zkJ?`F?446xl84wqC%GLVV}dDP*+m;Uj*LfopTmkD1|3?t1XGm{Nm- zgWVp^9qjG7uKEy}nVC5`XEJrtY?S z?HwFI!N3v|5>%-Y1%qJ7kszDjqEph+FflM%U2gU#c|W*#cyMNK?iRF*3JV2zkecm} zs`T6+`4GZNHK<>80h+yy^3fiykiDALEaI1;3ew)bhSa3*`=V1`qOw+-8 zUngdgu(>4abG@C=40XGJp}dnW-+T5p&593Bd-iTY(Zrf?`vE)Pk+hA4LIgB_y%}^C zhp=V(y442GLZJVMmUVq9A6^Zi{A@i9oCcMoONt)^lO|P|pHj!h(KZggDUK`c`ok?+ zu=BYIwbAn8{Ip(WjC!0pgCc=}Y$$tyR8p+GvU%&^z=$PnSzSHe$474<0vx?t0;Mhm zON7fk@vF=8n_e(Yzeagtj9?HyBud#{+Q*BZBz_h%?OD&mh7V7r2G>v9H3`y492`_k zR2G8mv0*F2i4-rE3n7(92wOB8dvmOx0}v=Icin7=qp!G+t{K#`w32esK!}49g1&vq zl;Mba`E+D1yG9Hz3qg+wXS`lDxEvZPvP$lIB?gqpBi_3$P!n0tb#LKrabhcT#L8q$ z6mvsEze?}taVqY|oV&GG5%)zxMlBR8HNuQliim%|bqjy{WUlZPLGW6^ zEgUtu#~&;#M|E>>u$4Em={093keoOO+5FhghJ>cwq_ou{lVu4PXA8C!Gou>?@4?Kp zf$Jtex1bmD2BTuauyN58Ly|=ffR|@J7_+(rb|A%BT`9WX6xUxm@-C<^mG~iJgY}LX zL3MJ`PQ+i_ka*aOjPWrus$r7O|1OD;DM{BbP#~qWT)UJhRcNGJJu;n8eM!yh>9~uh za%Z!3>y65Nbyiedd}OLuzBgQ%!?|f9w}_c?9!6&MLi-!yHrF}!)7X42s);~4?L}aN zWx6!A`|5(IIPFBz!P(G3H&2k6nTHG(!76Xy)c^6iodI!fQTRNZ0lE6Nh>SMxvT}br zrrmuuH_r_*F?phqp;*tvn>XNGAkcSP{qZPTo-}sY#r~(RTsF61^mYt}b(lM}htPGCDzydrtrpqBWzq*EEYd#}mkRBh`tv3O-jTINm4v zA%7s65JCUvOVez{Lxr-|16qhpkE!-ml!=xFm_Y4Hg}AEe?<8VNha7qJ~eQLYs9E?4V>$EkB#`5;W2OU$fMBZ-Uat1U+r@?b=b8-}1* zN^F;)iczUhIu!lJV{@xp$Q9LRbjNS4i0%)pY%WAF(W~LB5u=&#wedanZhBO{q*Kqg zdt>9=M`X1XtBBf<-4(j=V}0-r-&PY#uv6nZ%?ngT=CbzL=(G0)*5o!`m#}IS^Ipl@ zTt_)9*rUMD=Z3&%hmtl?{~=(WGT6ZZzhJ)=<1kU7oEgS=dh#)34?>NM&-;lH4fP9Y zR9P{O#3bd`&UYeQCiv$6D`=9A>h|hevT<0`#qwQ@cIYpZ3%_S)?@+aw?IpRuuB(fR#nsgnI4sU;tpU_7 z<56!AY_G)2!-aOIC--h1kAFX09yN#H=Q5_LiA^LDg##V+8Q8!U&}w(IIbZ3L)md)| z+|yn#^=jgxR$&-&kw8ViIG9$9TO^VrzH>luE9cAD*`VwYO&Lh0GLbVeAtA_@)VH*> z6cve-Cp#wwM~g@lAmfoHJW1wsL*8#SNo$cF$9pnu%r9(QhFO&2u8dWVew81uhY zDyJdjzWD*a&1x#4C4D2x?e?W=g;dRZTAK6}$@IJ)CMC*|CHt28WWvSF>~NTb3HfH!JhLUyPjB>92MGs$o==^cCgj)LS*ARr=q7Z_(#VG_h9Ga-H#7mOGN+`zKwN1D zC3<;ifj@Z)Ngi=IEZ>eItE0S%d_;ilWgS-5J-g~bQxTgv9h}C+1GLpbw|7CYDfv;r z)7$uHd&hLM0aDtG8)hSmLk;OK_L>MQv|0|ToqOu@ZRO7co(p&Ga$e9$t zVfI>P`_{TEsLvl{BN^HyCLqwA%;ct2Dz8?d%KV24L?|FB?UW~79h%{_?PpjzHa2#D z?>KZD-{5`%%(2ki%oZU6tanGRHy|snq@+YbM8pTp@J4SxaEWrAVTU~J&lXkoVm5?O z!+2unC#97PVz*{6n)vB7d3kwBRE@@oZmQ!tU95Av-X5kho8{)^VY;O-8o@9XT}B)o zA2XnR-Hmr4bGhCXJIrKcU`T#vV`Fpf4TQGTUT&bqxahn%PK2Rp^!8>G!pZ2rtbjFU zQL57Nc)Z+dT6aBJtkI*>2wKsWm)C7j<43Wuu=oT+ixCzP$>i}!H4i0vv(eGP=M`yf zeVVmM22BEmLP0`B6?9#1bCGl-kE>xMz=3o>$tx)_d4GMHsta{(kXKY>wpgww|6bU2 zh+P`UZty-^@*R`al8TzTqNGIUd8e(7>#(l3rw7L+0e^eoUH{?7J}s`t?Ju*cb}&Kc zwDE!ZB&*Kxvy`-nt|lH9i)EtY6OSf~l>scuj|t9;U@YmZAV}ZAcDIMm6-O^KD)Ru) z@Eyp*><2ATR((=teK{=bq;_)VU58aokr&M_x?wjchTN3UXE5*T$}ywmy3E1J!`w-{ zp8qCOig^02Z`f^cw%jlk--GuRET3jPtvOKHc-j(~;S9_0df#B1Uh(bd~}k2Al!f zRdYHVX?r95tF{NyUY2&#)7pZSs6thXuBgr;9H?tGg;Q83Z)$qSK-EfBUW=!|boz4bsGLoRt3}e1J{^X= z-cB4=SO8_-9CgI;yojub!*Iaxa9B7;h0+W>_tO`d56W~#tUJK7?r`CLO< zWxy$q7Mv0x+po|rsgSMulJ59ZqYFjF{56A>k$H_GuDoea1ZP+LgI!@i5NzB*z<10K z7^%U#2k*8XxhEX1R7jIvDbZEwl>2VDLBM~$r_oewz^E+J#~D5FH(ru^1pB*UKH6~Y z18L8I_t5+$n*2*}iBIK0*U8Qy5@dUasNpe6=(XoRIW{ze#~ThgLWcvF$dfJdjk~o* zNq}Iv^-5%a0@{%miOdPG>pLF^ zz|Y;{Ub9;{r?OCgw(}2px%a1EdU=Dt-9dRhk2UWnE;KMDV%`*vWI!&o0Z6SpSdu=Q zr-riS%F1|`5@Z8nB00Od9@7SXho&_%%VteR3fu7RAH{82t)H95pUz_R#lzd!jy{*iDqu-Bn+wlI%S_+^_C3i3ASo#p z3ilHfTVIJ%_64ms)SpiYOl_UvSz_LX6Tl~^Y3W1hgW~sO{-_KgVBdpsq(0}enHvT8DBtl&R=h@Y4Z<3gD-7nRdEb^1$J~9W4gf zrQDu&cg;ytP@8jldO8mfBgwi7Kd@KTeO|1D0j&l6wgbc{R%QjV*0cCXr>q1x1jd7R z^%fuiKrjdZz<(YVUq8o(oq@f*iKUU<@1e1#rfIj#_Bk}Pf9c%oI5|SFe6biOvj1A+ z0|tS%1J`NqPh1^FZ$hpxCRt3mbY7p zr!5xX6wHR?s_&?bmg0J)xApxjN}VIa6{b|+Scr$9Z3yll7s`z^5K&2!w}Vm$zeK)^ zONqEkw0tCk0vc`!rtJK_vQYmdk@$Y-wQ|!IM?s>1EwXC+gc4&yyf&At;P$A|432;# z8$bq{C(7DsjHW_xcs4B~toFQ}GgJO~{7}EkRkvizK&+L#;$!{Zup?-4Py87L`xv}u z|I2%~vbXbl3!T`8pK4Ww>zdotNk;psOjuHd)H`)uE;W!F27O^K5cnNE+qc zlk5IV8Jyj7`qxkmIZK*AD}1YcoQ4mz=gS;h8qZ(g{J#j{sNq)+1g@^;#0P`Dp_C6) z!navg8RM>G{RMIoK|}@|%mZ=E>mqpXH%a4lLB8d%fF|ciT;4^;df;|~x~4&YfsjnC zPWL^~vB!7C9g{`%XN;Kcqt{I?A`*~ZBEvR!PCyQM99~hc>&)gm$j{U+!$%RX)54!su4>W8}Gmpq~SvK z%aEJn2QkA256_RoHz`dt_)5rz>7}YDrzP{FFv3{Bjs8>^hkc0m)%fYj99M|~r&XP0VBc#$m`L}|y@-EvPj7iZq@txQPjq&pqx1XR*YAc&W&pq6z^NUccw zo1rW}i(;&8f!!ung(NEZFZ2;Q{7Hz7o4v=P`4?CNuyk#Mxr0@31HdApXaz**lbeZE zeehA)LH%EH;xb#%7o4Eh@?P=m+CVL1K~3xx+O;ZAKB}Ao>zF+TID21C#asCKho;0% z49Ly)KVUqU??lCg?m@vdZR)#Uxh{c4m5Aez@#NR}1g>ceiw4<2LQNX0R*Jm*zoqhyoX)CF* z3hAUadlhu#g8^zqpg~>$f?Ls29pj)PGRMiHp=Tf#MDc!v`U84dL5Q2j^;V%}=<6YK zkbnrB20$N^HcaN*8bOY*&UuuyPH-`jVjgk-ur!oTjzN^-_ixF@zTUAFpi!<3#q;vb zmcD7}VG#u2t^sA_G*66Z0&R}-NLVcwy#R&`8R6o4hPQE!PYSfEyXDX-vX7MG0qxfG z<*?p&(QtNT*P^!2k|dCxcOOwBZ#}}nG31f1ot6iH2EikCPUtOfg{USIBD9qqUnJT)IV~W>&mtw8H97RlYl;Xb z4(ymhs)GYLr@MY@gIQpT1%vOx*>HLxZU{s|39!gvjq{2PO&@+DNu`Sj6JU|U zBA#grz8iR*@fxrE$L*%K3Z)|&>mv|UEyrAGD zzG7~wDZhq@+Rd=ZoR#ujB{q%l?!g6G-;B=n%QB(n5l@ucVhT9gv3QkPq?T(3{F2HM zI@{TEeyOJ7JCDM5MW=^I&1mTw-QnG~_ymXw0+Jj`j&VchSpKcXT^ zOiXPb`ZHzR-Ap4-`FoXxv?+acshs6=I~29q`c$}ibxO7#P^1y{4usWM+?O2O(Ink_ zLHk^Nq|jxC_>Ne%o9(J2+N;*+2TS*vFlj5o|BdLom=W++x8jPhuloaBV6(^J*IsWk~HKax;dB&cJ0iEC-lwB)7dF~S7Bkkt$Y+yQ(ry}dZ2p(+fC3m2X4T*jRuR$20wm9)4VkxVk z=H7yODaXX_rCC>VprSO`wM_(3FfIjFy0N~FYpBfLe}C{SW{3lo6~6O^QPeWZ!I0(S z7GF)TH?4sYd%d!)AG+BUm^r_=7<|}IF0_LlH)M=Bm`A=hZP8B&)EIeS!qUC3l@!#= zSFDAfSj)N7tJO`|Hn0zx>7HtC`8iW27Rm_cvtR4OUXCYMMBkxw)17C=eBraC-h8xa zGZ2s_*nb-Yi#&tsK**|ndwZea29@$UxN>sVES{Hhp<4@G+!jydm!|qH>^XkAwNx@E z`LwMyuu@&^it5|O1#+j1Smby9)fLL~mKpLP{K7DS;KqdW! zo|Fys+n}a}rgb3EbERY~6eY3rJlCW=V&8U2hMpLcy=v+w2rxUYcOFs^USLfx^ zr>lqOG@7b zDKtkLL)}y0Y3eaBXLQM~VN}{{t;~DV=ye_j0r9m-TZ^kCV%j!@iF|<$6R=kKCHeMV zsheGB8sA;)w9Y%2!)A zTuVv%8$U;i5-G@Lc(Kok&fK6@a=W`6S{#Qed8TSRt>^*>6=INbi-TVBbk}17*;wBA z-eHubKpk(>ZfIZ3G+F9!nH?4|T$coS#A-8sO!O-7zE%tQmY^`E7Bd+GngT1yBRjzLq5{~C74QY6MvAx4*jg_GndQ%{TkCCy|5KO1^Di@b<7sQeA$=sUd@WqQPhsF zDqi{n9_ZlcQUPs!Hf0}&yBbS3IivGJQdS%8 zSi!=A4YXE)gvbT>H1@ z%BOn|MA}mZ6V}`ip1Xy(Hl@B$?(D7!KrDHVt*$cejeRqNhHJID+kH#}L2eWVvan}G z-U4;*D4TM2snto+ixL65aNtumu9VEw&vC@KKGkn~&HYf9MAdt2;6oWQi%=gML(-W9 zez*2r@Z*_0{e~AXvIF*_E{=f1KFo9BuYqMgE+D3L2R_S!<5D`xvzmT4bThuUMGyx7 z=@e`O6ZGDzr&Q(HZ!}g)YeO8u<|J(?bCsphL8=f&5AE(JL`14zjP39vMRNIqmD#9l zI@JU5ClUza3bZ4$b6;zQcL_3K17fvGJznDXy>z$|)?50EG->7agvg7hh#upVC2@bgya&dDK z3QWcorEq$Y1fLcO&k8smnVGe#GZ+kD-?82awme*4gKC>pV<+p5%RTO&qAPa8=#clZ z+Kd!Pg5mIPw91u@cXVk6J{40h&4T}?#EGB}dC&5v=fA?%y(i={G+@vTV3s)qEC)Ze)?5txQNNVtxMcH>t< zSz8_t4ZX|=*tT;wjP~O()(phKQDD-DH2iBot#xcJC7A8)wUO#o-#F^jGSb01K@QP= z3~9d11B7`hP=76yQoz#QAq8DRS(mRD%WGNJFzXk?v?d7^!@_D?haDH}yU=18Am8t@ zEKLoiX|b2z1+8+bzbJeFzy5?Q9AW<8{_eSdB%D{dWu?$?DfpE3MRUPRrUgis?=`or zYyk9qp>{Cf(f{m?Rxn8lhz0x^Syp}Pq4nBAWV@;g=yB)7-`Kd+>t_62AlwmO zR(($$k-d<`M$)LWc1g)FCX`nbG_Y2C()47H2|e4eY3|Wio4fq59yx4r7GPY=2nz7< z&2**`3}-g=PT?k-U>Thglp&WO+>)0{wfIq8%2*^+&leAZ8H?Gr&aDwS@+~x{zYS?0 zQE7n!?F#}mVe%38(|PLt<#gU|rhx7-0b1mWWD+&ewq2sH}~VPSyEd^t0T zQmWT+sRK`xb`qzOvbJl$2I{Eu@6HZ4f!GSFTME~eyPCs4W1Vo}FH(D*7tE(BT_{eZ z!xT04+Az}n!KcpYdfq~6$G?a*#+nl^iPyfR>r3pQ8PZApOrBpJtoN{Q6_L$JWoHc* z29%Y`2P>=KE4bt$#~FS2X`-bN%{dye>(Z=Wjo^qdTO88A5sE3C17PY;!XfMLXYawB z!3H%)zo;F^yu2qnBmg;-2WH&6%GR=+jd90(3#n zwIj`0QoZ)Hr`XF~G78i^U1J8*qN6Rv3W2%?rSR7!MlCn zRngq0%!$IgIjDL$AZI|8_FBGudkhi zMT0$RKE_{c;GEMVEZ_yM{Kb7X^I`d4*g!IHK*p(kQGfu9Sb8U??g9)BUBO*Z!jycv za*UCA*>8^b$GffPc#@4^16t4lm>PlOU}r-hY-@bhLauh%!sR$$jk=;WkOSNGkOT(s z;k;f?&R+QOG6%y-MI3I9w#sCSY~%ruA2jcq9-)6qAndqrvilHy2^oCS;?SDDij7RN zZ^H@O3AhDcsVw13J6$qHqs`khSkfCkXZ$g<^Hn7)jTob`qF{es$bRM9 zn+W7LEC!pZjn2+rrGgvuN*CT-yrx`Fjf8J+POL>3z3o9!uqtgChy#0|5hL3<`|vyD z57B$@dM4fZpm8Xt@M_gJ<(Q`7YEiJ-?Ae9@jVdIg4c_|l!}+mdSp;#O0!4LgZDbzO z!|}1?#NT-w3b79Dlr~wia87NOSK6tLti>@Bm?%tcV`dIjo!;CY9y2WX?8Yy|$1u7N zY(HAd1qkoWnPXuGGL1OZpCX5yWS~DCG;?L#rMSuy#ONTT88#P*k2sycF{QqD_w1ZC zU^w+z$#z_(A@Di$+1NVG-(HoS(%^Q!1I<*%MVVT{XSn~m zyI}u0Dzmp3SV8+7nZ|J%`-G}au=G-GMjL(ayAYP)g5!7w)Ef7*-~21y_j!FCiEgj=_}dQO(!lz+7}*mL8B2Oz zjzCVY+Y$6fEtIh{ItwOkXE>)IE8GTCQrL#AZq<52 z9_)jICaw{r?jeVdGVs_KPN#7~E3`QzFSLnwa zKVNaf70F0~HfTScq zi=T5!47C)Ig+X0{#GI?$6-mji0hFMx;382sP&fK8Z_MDUb_dZK0PW^i_U&@fSY^iq z;@qD;=YHp4tzsMQc3DOfiNm2`+w;&g?h87?M7mn$pXaMT9^GvBJnbIJFk^_gbjwtP z93~)zDej_cMgX^R)zG&q)JJuRjr+?K`R$Y{6zUO(Db;ck#@nYQQsB9lG;GGrB`6)M z_8|!`_!27*s>)L=j#Q@CG<0@BB`vOm#*jOz7-At1LfY~}FHCH*uC{k7(kK)wxvZL2 zG*3}b1uWY(pP!d!W~H}^cPPzLOe)!~RR*kUMxbbbs`kcHa_#N}wbTo8Cl(|p0s|>r ztIG=Xk*mF14wG(3Y7M~N8`j3|E6)sp_Q?ut8?#LIbIPc;pDJsEEtb+t?RCAX+MM`i zKiXb89w<{LHKAOc*K?8{6P=zaxW6R+~8pZfLviFKw@58*GSGCf6_K<(;MEQWZ`+#HlTm0JN{Argt-iYB$n(mL{&T>2CwpohMomGE19aoMd@8C^z6Gu~y z;7CrW=)#i05WQS$sgD&^>&pQJI}Hv(yJXw=GJ>5BHi*6DItg!Y>LDq_xYY7c%(v+_ zG!sDEEaD|QB;^!OoJ*ry2MfmMsLe1OBg9DEh%rVt^hL2jR&Vq5krmbiWD;|zcs#}k zyDrCVA+sdJ`-Uk8o{&DQ_V|2Yx5sjkezJJA;)Oe?ue8BoeFkV16+vkf((=jra$TXBX=h)=)ya<^RJX84Scg~L4PAv(GY}tmKOUAp>jt$WcY#>5M*iP_NWxP zawf{}%f6cP!uh7yVXl^oUo(@E4WO}UqQc9G^Q^l;-dyt4AT!6Tvxr-Qb)F>=8gNeW zXmtYz;ZUGlQ51wwk-v;n%=>_fVe=sy1gLXaC=EtBVymbthj*QEpm64*T16|=8Uhy- zA|T^c93g*z+*+$ra3}uK{XPTzwMGC|$Xbc)i~4(z73@B-HQ0w-ihOE{6*Dj%=@k1%Ecre%5p>vaBr6eIV5;l z_}Wab@SuMPYO<(4m*W}S@)ep@pJ)8fD3QcrkZekgAH%T!ubU&H zeK8y);Wlfkbl69!zLiq=v)?F)K8P-#0lP<7tM71+#jkexUw(+`QB8d5`&x?NpHv(! ztO8YVdCn;^s*+m|_yRJ_S|Uu389lACG3kP(2cY5y5^LFy)PkE!Ps9_B@ZflKF0!zT zUKKd(Fd1at__gvpMTg{sV>H?JEBxS=?Ov*=Ac0EgTIf&-ZSN-4C!&YWnBtvrJ>0qoY+0@z`~@AXq($tsvA{<9T~-63m>FtKh~3y?@eDLCOKKM8muwKC zx*3iRS|dEk{>9-8rDV0ShKg3KyzX`a|m6Q767s64Z;H(JX{dn!bL^F8aw*11u z3*K@5XE2!9xQZT2*e0J>GN18F8{4h{m)8kF7;s-kw2Zy;7Z9N_c*R8dQUeAqX!l zXwTz}eX*?@@f>%8msIe;*3tKpp?nQY>tH*g6GM;CejcHRdIlReU;{Nm8yh^V?=q%$ zjZwI>#e*JwX~fYEqvFACB^9UCrm`Q6nSfA++x>Mwkjkhwcg?aA4*v&3l5nS)JyQ(;tu=BvH5dv zzc0NTPN+nbMa;N)99$-!i!zU6_(X}vU~d`Q0+1+;@13pa@R@`Qhd=ND&EeAAwd6ib zuDh*hOp(I1x6GpkyQ#SDe6a4HfBbe-a6XFQY(6uj>WqIlDu2{0e)=Z=E1y+{0D@MQ zh9*W1w*RPBv}2&Px3V&~*0D4&r~TK`#Q#JK8%+}n z9U}ufS^*P#3mt2_|55(Cj4$cmDC_9y8JHVu=~e;c12vbWiQ^>zzdJiV7DJ9CIV8f z#r33zINtE2!a#mq3gauZA+sbi@g{S&y^HT3u)sk(<4fC+^ypE@ZhN%l0J9zRi;AZ^ zuUZ{7dhMKS0fhlV*BxYmJd#n>y6m~P6e&b|at zm#UaiMsj%X?Htl7sQq#!`ukFc+Rn1+OY?d7`V}DNjfDkMnss|ge{}5bYAS5$+Ustk zN8S+slu;NU;eQ~8U97N1E8>9DdnZQ)&wr#FLRR#}455FEHwMbxYMj~cStg7?T*FFs zg<9Fk>N52yxe+t-V(&R5`k$+eGD_i{z zp8galy1#}}f9U-uF8fojp6&n6>Ay1Dzdoct_5R6ifA&xR2IpUy?mr{_=DI(Hg#G_s z(f?t-e}et-{C+F{L0`YQ?@!VD8?gVtg#XQEzuD+dY5g0}KY8#!-{-%gT>cHppS<}0 z6@`T3|61!$p8U@!zZvgO(fAvbzw+k)SDg62!TBqX{uAeq$M#n`_|2q$%Gf8)-}0)g X1n6gK5dZ-0^N;28cndz)@816daiH(Y diff --git a/doc/worker/about.rst b/doc/worker/about.rst deleted file mode 100644 index 030a7079..00000000 --- a/doc/worker/about.rst +++ /dev/null @@ -1,86 +0,0 @@ -Description -=========== - -Purpose -------- - -A Python-based Gearman worker that handles messages for the Gearman job queue -sharing the same name as the local hostname. The messages that it receives are -JSON objects describing a load balancer, and returns this same JSON object, but -with status fields added to describe the state of the LB. - -Installation ------------- - -Installing the Required Tools -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -You must have Python setuptools installed. On Ubuntu:: - - $ sudo apt-get install python-setuptools - -Now you may install the Libra toolset:: - - $ sudo python setup.py install - -The worker also needs some packages installed in order to be used with -HAProxy. The commands below will install them on Ubuntu:: - - $ sudo apt-get install haproxy - $ sudo apt-get install socat - -The Ubuntu default is to have HAProxy disabled. You will need to edit the -file */etc/default/haproxy* and set *ENABLED* to 1 if you want HAProxy to -actually start (hint: you do). - -Edit /etc/sudoers -^^^^^^^^^^^^^^^^^ - -The worker needs to be able to run some commands as root without being -prompted for a password. It is suggested that you run the worker as -the `haproxy` user and `haproxy` group on Ubuntu systems. Then add the -following line to /etc/sudoers:: - - %haproxy ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /bin/chown - -The above lets everyone in the *haproxy* group run those commands -as root without being prompted for a password. - -Configuration File ------------------- - -It can be easier to give options via a configuration file. See the sample -configuration file etc/sample_libra.cfg for an example and further -documentation. Use the :option:`--config ` option -to specify the configuration file to read. - -Running the Worker ------------------- - -The worker can run in either daemon or non-daemon mode. Daemon mode requires -escalated privileges so that it can behave like a proper daemon. Non-daemon -mode (:option:`--nodaemon ` option) is useful for testing. - -Basic commands:: - - # Getting help - $ libra_worker -h - - # Start up as a daemon running as the `haproxy` user and - # connecting to the local Gearman job server. - $ sudo libra_worker --user haproxy --group haproxy --server 127.0.0.1:4730 - - # Start up with debugging output in non-daemon mode - $ libra_worker --debug --nodaemon - -NOTE: When running the worker in daemon mode, you must make sure that the -directory where the PID file will be (:option:`--pid ` -option) and the directory where the log files will be written -(:option:`--logfile ` option) exists and is writable -by the user/group specified with the :option:`--user ` -and :option:`--group ` options. - -You can verify that the worker is running by using the sample Gearman -client in the bin/ directory:: - - $ bin/client.py - diff --git a/doc/worker/code.rst b/doc/worker/code.rst deleted file mode 100644 index ae0ec531..00000000 --- a/doc/worker/code.rst +++ /dev/null @@ -1,111 +0,0 @@ -Code Walkthrough -================ - -Here we'll highlight some of the more important code aspects. - -Gearman Worker Thread ---------------------- -.. py:module:: libra.worker.worker - -.. py:function:: config_thread(logger, driver, servers, reconnect_sleep) - - This function encapsulates the functionality for the Gearman worker thread - that will be started by the :py:class:`~libra.worker.main.EventServer` - class. It should never exit. - - This function connects to the Gearman job server(s) and runs the Gearman - worker task, which itself is another function that is called for each - message retrieved from the Gearman job servers. - - If all Gearman job servers become unavailable, the worker would - normally exit. This function identifies that situation and periodically - attempts to restart the worker in an endless loop. - - -EventServer Class ------------------ - -.. py:module:: libra.worker.main - -.. py:class:: EventServer(logger) - - This class encapsulates the server activity once it starts in either - daemon or non-daemon mode and all configuration options are read. It - uses the `eventlet `_ Python module to start - tasks that it will be supplied. - - .. py:method:: main(tasks) - - The one and only method in the class and represents the primary - function of the program. A list of functions and their parameters - is supplied as the only argument. Each function will be started in - its own Green Thread. - - -LBaaSController Class ---------------------- - -.. py:module:: libra.worker.controller - -.. py:class:: LBaaSController(logger, driver, json_msg) - - This class is used by the Gearman task started within the worker thread - (the :py:func:`~libra.worker.worker.config_thread` function) to drive the - Gearman message handling. - - .. py:method:: run() - - This is the only method that should be called directly. It parses the - JSON message given during object instantiation and determines the action - to perform based on the contents. It returns another JSON message that - should then be returned to the Gearman client. - -LoadBalancerDriver Class ------------------------- - -See :ref:`libra-worker-driver` for information - - -Relationship Diagram --------------------- - -Below is a conceptual diagram that shows the basic relationships between -the items described above:: - - +-------------+ JSON request +-------------------+ - | Gearman | --------------------> | | - | worker | | LBaaSController | - | task | <-------------------- | | - +-------------+ JSON response +-------------------+ - | ^ - | | - API call | | (Optional Exception) - | | - V | - +----------------------+ - | | - | LoadBalancerDriver | - | | - +----------------------+ - -The steps shown above are: - -.. py:module:: libra.worker - -* The Gearman worker task used in the worker thread (see the - :py:func:`~worker.config_thread` function), is run when the worker - receives a message from the Gearman job server (not represented above). -* This task then uses the :py:class:`~controller.LBaaSController` to process - the message that it received. -* Based on the contents of the message, the controller then makes the relevant - driver API calls using the :py:class:`~drivers.LoadBalancerDriver` driver - that was selected via the :option:`--driver ` - option. -* The driver executes the API call. If the driver encounters an error during - execution, an exception is thrown that should be handled by the - :py:class:`~controller.LBaaSController` object. Otherwise, nothing is - returned, indicating success. -* The :py:class:`~controller.LBaaSController` object then creates a response - message and returns this message back to the Gearman worker task. -* The Gearman worker task sends the response message back through the Gearman - job server to the originating client (not represented above). diff --git a/doc/worker/config.rst b/doc/worker/config.rst deleted file mode 100644 index 137269d8..00000000 --- a/doc/worker/config.rst +++ /dev/null @@ -1,40 +0,0 @@ -Worker Configuration -==================== - -These options are specific to the worker in addition to the -:doc:`common options `. - -Configuration File ------------------- - - The ``[worker]`` section is specific to the libra_worker utility. Below - is an example: - - .. code-block:: ini - - [worker] - driver = haproxy - pid = /var/run/libra/libra_worker.pid - - Note that drivers supported by the worker may add additional subsections - to the configuration file for their configuration needs. See the - :doc:`haproxy driver documentation ` for an example. - - Options supported in this section: - - .. option:: driver - - Load balancer driver to use. Valid driver options are: - - * *haproxy* - `HAProxy `_ software load balancer. - This is the default driver. - - .. option:: pid - - Location for the process PID file. - -Command Line Options --------------------- - - Some options can be specified via the command line. Run with the - -h or --help option for a full listing. diff --git a/doc/worker/driver.rst b/doc/worker/driver.rst deleted file mode 100644 index 8104aced..00000000 --- a/doc/worker/driver.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. _libra-worker-driver: - -Drivers -======= - -The driver is the part of the Worker which is responsible for doing actions -towards the underlying service like HAProxy or other. - -It's a plugin based python class that has a generic API for configuring up -:term:`device`. - -LoadBalancerDriver Class ------------------------- - -See Drivers for driver documentation - -.. py:module:: libra.worker.drivers - -.. py:class:: LoadBalancerDriver - - This defines the API for interacting with various load balancing - appliances. Drivers for these appliances should inherit from this - class and implement the relevant API methods that it can support. - `This is an abstract class and is not meant to be instantiated directly.` - - Generally, an appliance driver should queue up any configuration changes - made via these API calls until the :py:meth:`create` method is called. - The :py:meth:`suspend`, :py:meth:`enable`, :py:meth:`delete`, - :py:meth:`get_stats()` and :py:meth:`archive` methods should take - immediate action. - - .. py:method:: init() - - .. py:method:: add_server(host, port) - - .. py:method:: set_protocol(protocol, port) - - .. py:method:: set_algorithm(algorithm) - - .. py:method:: create() - - .. py:method:: suspend() - - .. py:method:: enable() - - .. py:method:: delete() - - .. py:method:: get_stats() - - .. py:method:: archive() - -Known Load Balancer Drivers Dictionary --------------------------------------- - -.. py:data:: known_drivers - - This is the dictionary that maps values for the - :option:`--driver ` option - to a class implementing the driver :py:class:`~LoadBalancerDriver` API - for that appliance. After implementing a new driver class, you simply add - a new entry to this dictionary to plug in the new driver. - -.. note:: - - See below for driver specific documentation - -.. toctree:: - :maxdepth: 2 - :glob: - - drivers/* \ No newline at end of file diff --git a/doc/worker/drivers/haproxy.rst b/doc/worker/drivers/haproxy.rst deleted file mode 100644 index 397dcaf9..00000000 --- a/doc/worker/drivers/haproxy.rst +++ /dev/null @@ -1,64 +0,0 @@ - -.. _libra-worker-driver-haproxy: - -HAProxy driver -============== - -Configuration File ------------------- - - The ``[worker:haproxy]`` section is read by the HAProxy driver. - - .. code-block:: ini - - [worker:haproxy] - service = ubuntu - logfile = /var/log/haproxy.log - - Options supported in this section: - - .. option:: logfile - - Path where HAProxy will store its logs. Note that this file is not - created by the worker, but rather by the haproxy process itself. Its - contents will be delivered in response to an ARCHIVE request from the - API server. - - .. note:: - - See :ref:`libra-worker-driver-haproxy-archiving` for information on - archiving. - - .. option:: statsfile - - Location of the HAProxy statistics cache file. This file needs to be - placed in a location where the worker has write access and where it - will not be deleted by external processes (so don't place it in /tmp). - This is used to deliver usage reports to the API server in response to - a STATS requests. - - .. option:: service - - The underlying OS Service implementation to use. Default is 'ubuntu'. - -.. _libra-worker-driver-haproxy-archiving: - -Log archiving -------------- - -In order to support log-archiving with haproxy you need to redirect -the rsyslog feed from local0 to a dedicated file - -.. note:: - - Change the /var/log/haproxy.log to the path you have set in the worker - section of the config. - -:: - - cat >/etc/rsyslog.d/10-haproxy.conf<.. - -A **release** field will also be returned in the JSON message. It contains -more complete versioning information as returned from a 'git describe'. - -Required Fields -^^^^^^^^^^^^^^^ - -* hpcs_action - -Example Request -^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "DISCOVER" - } - -Example Response -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "DISCOVER", - "version": "1.0", - "release": "1.0.alpha.3.gca84083", - "hpcs_response": "PASS" - } - - -ARCHIVE Message ---------------- - -The ARCHIVE message requests that the load balancer send any available logs -to a destination defined within the request. Currently, the only supported -destination is a Swift account. - -If the request fails, **hpcs_response** will be set to *FAIL* and a field -named **hpcs_error** will be added with an error message explaining the -failure. - -Required Fields -^^^^^^^^^^^^^^^ - -* hpcs_action -* hpcs_object_store_type -* hpcs_object_store_basepath -* hpcs_object_store_endpoint -* hpcs_object_store_token -* loadBalancers -* loadBalancers.protocol - -Example Request -^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "ARCHIVE", - "hpcs_object_store_basepath": "lbaaslogs", - "hpcs_object_store_endpoint": "https://example.com/v1/100", - "hpcs_object_store_token": "MY_AUTH_TOKEN", - "hpcs_object_store_type": "swift", - "loadBalancers": [ - { - "id": "15", - "name": "lb #1", - "protocol": "HTTP" - } - ] - } - -Example Response -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "ARCHIVE", - "hpcs_object_store_basepath": "lbaaslogs", - "hpcs_object_store_endpoint": "https://example.com/v1/100", - "hpcs_object_store_token": "MY_AUTH_TOKEN", - "hpcs_object_store_type": "swift", - "loadBalancers": [ - { - "id": "15", - "name": "lb #1", - "protocol": "HTTP" - } - ], - "hpcs_response": "FAIL", - "hpcs_error": "Some error string explaining the failure." - } - - -STATS Message -------------- - -The STATS message queries the worker for general availability (i.e., a ping) -Currently, this doesn't do more than verify that the HAProxy process is -running and we can successfully query its statistics socket. - -Required Fields -^^^^^^^^^^^^^^^ - -* hpcs_action - -Example Request -^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "STATS" - } - -Example Response -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "STATS", - "hpcs_response": "PASS" - } - - -METRICS Message ---------------- - -The METRICS message queries the worker for load balancer usage metrics. -The number of bytes out for each load balancer defined on the device -is returned in the response. - -Required Fields -^^^^^^^^^^^^^^^ - -* hpcs_action - -Example Request -^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "METRICS" - } - -Example Response -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "hpcs_action": "METRICS", - "utc_start": "2014-01-09 15:11.45.704754", - "utc_end": "2014-01-09 16:10.00.72683", - "loadBalancers": [ - { - "protocol": "HTTP", - "bytes_out": "12345" - }, - { - "protocol": "TCP", - "bytes_out": "5678" - } - ], - "hpcs_response": "PASS" - } - diff --git a/etc/libra.cfg b/etc/libra.cfg deleted file mode 100644 index 6bd05fdd..00000000 --- a/etc/libra.cfg +++ /dev/null @@ -1,210 +0,0 @@ -######################################################################## -# A sample configuration file read by the Libra utilities. -######################################################################## - -#----------------------------------------------------------------------- -# The [DEFAULT] section contains options common to the various Libra -# utilities (worker, mgm, etc). -#----------------------------------------------------------------------- - -[DEFAULT] - -# Options to enable more verbose output -#verbose = false -#debug = false - -# Daemon process options -#daemon = true -#user = libra -#group = libra -#billing_enable = False - -# Openstack -#notification_driver = openstack.common.notifier.rpc_notifier -#default_notification_level = INFO -#default_publisher_id = id -#host = localhost - -# Kombu -rabbit_use_ssl = True -#kombu_ssl_version = '' -#kombu_ssl_keyfile = '' -#kombu_ssl_certfile = '' -#kombu_ssl_ca_certs = '' -#rabbit_host = localhost -#rabbit_port = 5672 -#rabbit_userid = guest -#rabbit_password = guest -#rabbit_hosts = -#rabbit_host = localhost -#rabbit_port = -#rabbit_virtual_host = / -#rabbit_retry_interval = 1 -#rabbit_retry_backoff = 2 -#rabbit_max_retries = 0 -#rabbit_ha_queues = False -#control_exchange = openstack -#amqp_durable_queues = False - - -#----------------------------------------------------------------------- -# Options for utilities that are Gearman workers or clients. -#----------------------------------------------------------------------- - -[gearman] - -#servers = localhost:4730, HOST:PORT -#keepalive = false -#keepcnt = COUNT -#keepidle = SECONDS -#keepintvl = SECONDS -#poll = 1 -#reconnect_sleep = 60 -#ssl_ca = /path/to/ssl_ca -#ssl_cert = /path/to/ssl_cert -#ssl_key = /path/to/ssl_key - - -#----------------------------------------------------------------------- -# [worker] and [worker:*] sections are specific to the Libra worker. -#----------------------------------------------------------------------- - -[worker] - -#driver = haproxy -#pid = /var/run/libra/libra_worker.pid - -# HAProxy driver options for the worker -[worker:haproxy] -#service = ubuntu -#statsfile = /var/log/haproxy.stats - - -#----------------------------------------------------------------------- -# The [mgm] section is specific to the libra_mgm utility. -#----------------------------------------------------------------------- - -[mgm] - -# Options with defaults -#pid = /var/run/libra/libra_mgm.pid -#threads = 8 -#rm_fip_ignore_500 = false -#nova_insecure = false -#build_diag_timeout = 10 - -# Required options -az = 1 -nova_auth_url = https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ -nova_keyname = default -nova_region = region -nova_secgroup = default -nova_user = username -nova_pass = password -nova_image = 12345 -nova_image_size = standard.medium - -# Others -node_basename = BASENAME -nova_az_name = NAME -nova_bypass_url = URL -nova_net_id = ID -nova_tenant = TENANT -nova_tenant_id = TENANTID - - -#----------------------------------------------------------------------- -# The [admin_api] section is specific to the libra_admin_api utility. -#----------------------------------------------------------------------- - -[admin_api] - -# Options with defaults -#host = 0.0.0.0 -#port = 8889 -pid = /var/run/libra/libra_admin_api.pid -#stats_device_error_limit = 5 -#stats_offline_ping_limit = 10 -#stats_poll_timeout = 5 -#stats_poll_timeout_retry = 30 -#exists_freq = 60 -#usage_freq = 60 -#stats_freq = 5 -#server_id = 0 -#number_of_servers = 1 -#expire_days = 0 -#vip_pool_size = 10 -#node_pool_size = 10 -#stats_driver = dummy -#stats_enable = False -#stats_purge_enable = False - -# The following are the seconds of each minute -# that the timers will run. The defaults should -# not need to be changed.. -#stats_purge_days = 5 -#delete_timer_seconds = 5 -#ping_timer_seconds = 15 -#stats_timer_seconds = 20 -#usage_timer_seconds = 25 -#probe_timer_seconds = 30 -#offline_timer_seconds = 45 -#vips_timer_seconds = 50 -#exists_timer_seconds = 55 - -# Required options -db_sections = mysql1 -ssl_certfile = certfile.crt -ssl_keyfile = keyfile.key - -# Datadog plugin options -#datadog_env = unknown -datadog_api_key = KEY -datadog_app_key = KEY2 -datadog_message_tail = MSG -datadog_tags = service:lbaas - -# Others - -#----------------------------------------------------------------------- -# The [api] section is specific to the libra_api utility. -#----------------------------------------------------------------------- - -[api] - -# Options with defaults -#disable_keystone=False -#host = 0.0.0.0 -#port = 443 -#keystone_module = keystoneclient.middleware.auth_token:AuthProtocol -#pid = /var/run/libra/libra_api.pid - -# Required options -db_sections = mysql1 -swift_basepath = lbaaslogs -swift_endpoint = https://host.com:443/v1/ - -# Others -ssl_certfile = certfile.crt -ssl_keyfile = keyfile.key -ip_filters = 192.168.0.0/24 - -#----------------------------------------------------------------------- -# The [mysql*] sections are referenced by admin_api and api by the -# db_sections values. -#----------------------------------------------------------------------- - -[mysql1] - -username = root -password = -schema = lbaas -host = localhost -port = 3306 - - -#----------------------------------------------------------------------- -# The API will reference keystone options here -#----------------------------------------------------------------------- - -[keystone] diff --git a/etc/logging.conf b/etc/logging.conf deleted file mode 100644 index 44df3e57..00000000 --- a/etc/logging.conf +++ /dev/null @@ -1,35 +0,0 @@ -[loggers] -keys=root - -[logger_root] -level=DEBUG -handlers=screen,rotating_file - -[formatters] -keys=simple,ts,newline - -[formatter_simple] -format=%(name)s - %(levelname)s - %(message)s - -[formatter_ts] -format=%(asctime)s - %(name)s - %(levelname)s - %(message)s - -[formatter_newline] -format=%(asctime)s - %(name)s - %(levelname)s - %(message)s -class=libra.common.log.NewlineFormatter - -[handlers] -keys=rotating_file,screen - -[handler_rotating_file] -formatter=newline -class=libra.common.log.CompressedTimedRotatingFileHandler -level=DEBUG -args=('/var/log/libra/libra.log',) - -[handler_screen] -class=StreamHandler -formatter=ts -level=AUDIT -args=(sys.stdout,) - diff --git a/etc/mnb.cfg b/etc/mnb.cfg deleted file mode 100644 index 5ac9305a..00000000 --- a/etc/mnb.cfg +++ /dev/null @@ -1,39 +0,0 @@ -######################################################################## -# Config for oslo notifier -######################################################################## - -[DEFAULT] -# Options to enable more verbose output -verbose = true -debug = true -use_stderr = true -publish_errors = true -logfile = /tmp/libra.log - -# Openstack -notification_driver = drivername -default_notification_level = INFO -default_publisher_id = lbaas -host = apiTest - -# Kombu -rabbit_use_ssl = True -rabbit_host = localhost -rabbit_port = 5671 -rabbit_userid = user -rabbit_password = password -#rabbit_hosts = -rabbit_virtual_host = vhost -rabbit_retry_interval = 1 -rabbit_retry_backoff = 2 -rabbit_max_retries = 0 -rabbit_ha_queues = False -fake_rabbit = False -control_exchange = exchange -amqp_durable_queues = True - -[admin_api] -billing_enable = True -exists_freq = 20 -logfile = /tmp/libra_admin.log -db_sections = '' diff --git a/libra/__init__.py b/libra/__init__.py deleted file mode 100644 index 2015e9e8..00000000 --- a/libra/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -__version__ = pbr.version.VersionInfo('libra').version_string() -__release__ = pbr.version.VersionInfo('libra').release_string() diff --git a/libra/admin_api/__init__.py b/libra/admin_api/__init__.py deleted file mode 100644 index bdf64fe0..00000000 --- a/libra/admin_api/__init__.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - - -adminapi_group = cfg.OptGroup('admin_api', 'Libra Admin API options') - -cfg.CONF.register_group(adminapi_group) - -cfg.CONF.register_opts( - [ - cfg.BoolOpt('disable_keystone', - default=False, - help='Unauthenticated server, for testing only'), - cfg.StrOpt('keystone_module', - default='keystoneclient.middleware.auth_token:AuthProtocol', - help='A colon separated module and class for keystone ' - ' middleware'), - cfg.StrOpt('datadog_api_key', - help='API key for datadog alerting'), - cfg.StrOpt('datadog_app_key', - help='Application key for datadog alerting'), - cfg.StrOpt('datadog_env', - default='unknown', - help='Server enironment'), - cfg.StrOpt('datadog_message_tail', - help='Text to add at the end of a Datadog alert'), - cfg.StrOpt('datadog_tags', - help='A space separated list of tags for Datadog alerts'), - cfg.ListOpt('db_sections', - required=True, - help='MySQL config sections in the config file'), - cfg.IntOpt('expire_days', - default=0, - help='Number of days until deleted load balancers ' - 'are expired'), - cfg.StrOpt('host', - default='0.0.0.0', - help='IP address to bind to, 0.0.0.0 for all IPs'), - cfg.IntOpt('node_pool_size', - default=10, - help='Number of hot spare devices to keep in the pool'), - cfg.IntOpt('number_of_servers', - default=1, - help='number of Admin API servers, used to calculate ' - 'which Admin API server should stats ping next'), - cfg.StrOpt('pid', - default='/var/run/libra/libra_admin_api.pid', - help='PID file'), - cfg.IntOpt('port', - default=8889, - help='Port number for API server'), - cfg.IntOpt('server_id', - default=0, - help='server ID of this server, used to calculate which ' - 'Admin API server should stats ping next ' - '(start at 0)'), - cfg.StrOpt('ssl_certfile', - help='Path to an SSL certificate file'), - cfg.StrOpt('ssl_keyfile', - help='Path to an SSL key file'), - cfg.IntOpt('stats_device_error_limit', - default=5, - help='Max number of simultaneous device failures to allow ' - 'recovery on'), - cfg.ListOpt('stats_driver', - default=['dummy'], - help='type of stats device to use'), - cfg.IntOpt('stats_offline_ping_limit', - default=10, - help='Number of failed pings to an OFFLINE device before ' - 'deleting it'), - cfg.IntOpt('stats_poll_timeout', - default=5, - help='gearman timeout value for initial ping request ' - '(in seconds)'), - cfg.IntOpt('stats_poll_timeout_retry', - default=30, - help='gearman timeout value for retry ping request ' - '(in seconds)'), - cfg.IntOpt('vip_pool_size', - default=10, - help='Number of hot spare vips to keep in the pool'), - cfg.BoolOpt('stats_enable', - default=False, - help='Enable / Disable usage statistics gathering'), - cfg.IntOpt('exists_freq', - metavar='MINUTES', - default=60, - help='Minutes between sending of billing exists messages'), - cfg.IntOpt('usage_freq', - metavar='MINUTES', - default=60, - help='Minutes between sending of billing usage messages'), - cfg.IntOpt('stats_freq', - metavar='MINUTES', - default=5, - help='Minutes between collecting usage statistics'), - cfg.BoolOpt('stats_purge_enable', - default=False, - help='Enable / Disable purging of usage statistics'), - cfg.IntOpt('stats_purge_days', - metavar='DAYS', - default=5, - help='Number of days to keep usage statistics'), - cfg.IntOpt('delete_timer_seconds', - default=5, - help='Which second of each minute delete timer should run'), - cfg.IntOpt('ping_timer_seconds', - default=15, - help='Second of each minute ping timer should run'), - cfg.IntOpt('stats_timer_seconds', - default=20, - help='Second of each minute statistics timer should run'), - cfg.IntOpt('usage_timer_seconds', - default=25, - help='Which second of each minute usage timer should run'), - cfg.IntOpt('probe_timer_seconds', - default=30, - help='Which second of each minute probe timer should run'), - cfg.IntOpt('offline_timer_seconds', - default=45, - help='Second of each minute offline timer should run'), - cfg.IntOpt('vips_timer_seconds', - default=50, - help='Which second of each minute vips timer should run'), - cfg.IntOpt('exists_timer_seconds', - default=55, - help='Second of each minute exists timer should run'), - ], - group=adminapi_group -) diff --git a/libra/admin_api/acl.py b/libra/admin_api/acl.py deleted file mode 100644 index f4742a68..00000000 --- a/libra/admin_api/acl.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ConfigParser -import importlib - -from oslo.config import cfg -from pecan import request - -from libra.openstack.common import log -from libra.common.api.lbaas import db_session, AdminAuth - -LOG = log.getLogger(__name__) - - -def get_limited_to_project(headers): - """Return the tenant the request should be limited to.""" - tenant_id = headers.get('X-Tenant-Id') - LOG.info( - 'Admin API {0} request {1} ({2}) from {3} tenant {4}'.format( - request.environ.get('REQUEST_METHOD'), - request.environ.get('PATH_INFO'), - request.environ.get('QUERY_STRING'), - request.environ.get('REMOTE_ADDR'), - tenant_id - ) - ) - - return tenant_id - - -def tenant_is_type(headers, tenant_types): - """ Check the tenant ID is a user of the Admin API and allowed to use the - API command specified - """ - tenant_id = get_limited_to_project(headers) - if not tenant_id: - return False - with db_session() as session: - is_auth = session.query(AdminAuth).\ - filter(AdminAuth.tenant_id == tenant_id).\ - filter(AdminAuth.level.in_(tenant_types)).count() - if is_auth > 0: - session.commit() - return True - session.commit() - return False - - -def tenant_is_user(headers): - return tenant_is_type(headers, ['USER', 'ADMIN']) - - -def tenant_is_admin(headers): - return tenant_is_type(headers, ['ADMIN']) - - -class AuthDirector(object): - """ There are some paths we want to work unauthenticated. This class - will direct intentionally unauthenticated requests to the relevant - controllers. """ - - def __init__(self, app): - self.unauthed_app = app - if not cfg.CONF['admin_api']['disable_keystone']: - self.app = self._install() - else: - self.app = app - - def __call__(self, env, start_response): - uri = env['PATH_INFO'] - if uri in ['/', '/v1', '/v1/', '/v2.0', '/v2.0/']: - return self.unauthed_app(env, start_response) - else: - return self.app(env, start_response) - - def _install(self): - """Install ACL check on application.""" - config = ConfigParser.SafeConfigParser() - config.read(cfg.CONF['config_file']) - module_details = cfg.CONF['admin_api']['keystone_module'].split(':') - keystone = importlib.import_module(module_details[0]) - auth_class = getattr(keystone, module_details[1]) - return auth_class(self.unauthed_app, config._sections['keystone']) diff --git a/libra/admin_api/app.py b/libra/admin_api/app.py deleted file mode 100644 index 69c1a64b..00000000 --- a/libra/admin_api/app.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -eventlet.monkey_patch() -import daemon -import daemon.pidfile -import daemon.runner -import grp -import logging as std_logging -import pwd -import pecan -import sys -import signal - -from eventlet import wsgi - -from libra import __version__ -from libra.common.api import server -from libra.admin_api.stats.drivers.base import known_drivers -from libra.admin_api.stats.ping_sched import PingStats -from libra.admin_api.stats.offline_sched import OfflineStats -from libra.admin_api.stats.billing_sched import BillingStats -from libra.admin_api.stats.stats_sched import UsageStats -from libra.admin_api.device_pool.manage_pool import Pool -from libra.admin_api.expunge.expunge import ExpungeScheduler -from libra.admin_api import config as api_config -from libra.admin_api import model -from libra.admin_api import acl -from libra.openstack.common import importutils -from libra.openstack.common import log as logging -from libra.common.log import get_descriptors -from libra.common.options import CONF -from libra.common.options import add_common_opts -from libra.common.options import check_gearman_ssl_files - - -LOG = logging.getLogger(__name__) - - -def get_pecan_config(): - # Set up the pecan configuration - filename = api_config.__file__.replace('.pyc', '.py') - return pecan.configuration.conf_from_file(filename) - - -def setup_app(pecan_config): - - model.init_model() - - if not pecan_config: - pecan_config = get_pecan_config() - config = dict(pecan_config) - config['database'] = CONF['admin_api']['db_sections'] - config['gearman'] = { - 'server': CONF['gearman']['servers'], - 'ssl_key': CONF['gearman']['ssl_key'], - 'ssl_cert': CONF['gearman']['ssl_cert'], - 'ssl_ca': CONF['gearman']['ssl_ca'], - 'keepalive': CONF['gearman']['keepalive'], - 'keepcnt': CONF['gearman']['keepcnt'], - 'keepidle': CONF['gearman']['keepidle'], - 'keepintvl': CONF['gearman']['keepintvl'] - } - if CONF['debug']: - config['wsme'] = {'debug': True} - config['app']['debug'] = True - - pecan.configuration.set_config(config, overwrite=True) - - app = pecan.make_app( - pecan_config.app.root, - static_root=pecan_config.app.static_root, - template_path=pecan_config.app.template_path, - debug=getattr(pecan_config.app, 'debug', False), - force_canonical=getattr(pecan_config.app, 'force_canonical', True), - guess_content_type_from_ext=getattr( - pecan_config.app, - 'guess_content_type_from_ext', - True) - ) - - final_app = acl.AuthDirector(app) - - return final_app - - -class MaintThreads(object): - def __init__(self, drivers): - self.classes = [] - self.drivers = drivers - signal.signal(signal.SIGINT, self.exit_handler) - signal.signal(signal.SIGTERM, self.exit_handler) - self.run_threads() - - def run_threads(self): - - pool = Pool() - self.classes.append(pool) - - expunge = ExpungeScheduler() - self.classes.append(expunge) - - pings = PingStats(self.drivers) - self.classes.append(pings) - - offline = OfflineStats(self.drivers) - self.classes.append(offline) - - if CONF['admin_api'].stats_enable: - usage = UsageStats(self.drivers) - self.classes.append(usage) - - if CONF['billing_enable']: - billing = BillingStats(self.drivers) - self.classes.append(billing) - - def exit_handler(self, signum, frame): - signal.signal(signal.SIGINT, signal.SIG_IGN) - signal.signal(signal.SIGTERM, signal.SIG_IGN) - for function in self.classes: - function.shutdown() - sys.exit() - - -class LogStdout(object): - def write(self, data): - if data.strip() != '': - LOG.info(data) - - -def main(): - add_common_opts() - CONF(project='libra', version=__version__) - - logging.setup('libra') - - LOG.debug('Configuration:') - CONF.log_opt_values(LOG, std_logging.DEBUG) - - drivers = [] - - pc = get_pecan_config() - - sock = server.make_socket(CONF['admin_api']['host'], - CONF['admin_api']['port'], - CONF['admin_api']['ssl_keyfile'], - CONF['admin_api']['ssl_certfile']) - - if CONF['daemon']: - pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['admin_api']['pid'], - 10) - if daemon.runner.is_pidfile_stale(pidfile): - pidfile.break_lock() - - descriptors = get_descriptors() - descriptors.append(sock.fileno()) - context = daemon.DaemonContext( - working_directory='/', - umask=0o022, - pidfile=pidfile, - files_preserve=descriptors - ) - if CONF['user']: - context.uid = pwd.getpwnam(CONF['user']).pw_uid - if CONF['group']: - context.gid = grp.getgrnam(CONF['group']).gr_gid - context.open() - - try: - check_gearman_ssl_files() - except Exception as e: - LOG.critical(str(e)) - return - - # Use the root logger due to lots of services using logger - LOG.info('Starting on %s:%d', CONF.admin_api.host, CONF.admin_api.port) - api = setup_app(pc) - - for driver in CONF['admin_api']['stats_driver']: - drivers.append(importutils.import_class(known_drivers[driver])) - - MaintThreads(drivers) - sys.stderr = LogStdout() - - wsgi.server(sock, api, keepalive=False) - - return 0 diff --git a/libra/admin_api/config.py b/libra/admin_api/config.py deleted file mode 100644 index 9ad91482..00000000 --- a/libra/admin_api/config.py +++ /dev/null @@ -1,26 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Pecan Application Configurations -app = { - 'root': 'libra.admin_api.controllers.root.RootController', - 'modules': ['libra.admin_api'], - 'static_root': '%(confdir)s/public', - 'template_path': '%(confdir)s/admin_api/templates', - 'errors': { - 404: '/notfound', - '__force_dict__': True - } -} diff --git a/libra/admin_api/controllers/__init__.py b/libra/admin_api/controllers/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/controllers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/controllers/root.py b/libra/admin_api/controllers/root.py deleted file mode 100644 index 534db4dc..00000000 --- a/libra/admin_api/controllers/root.py +++ /dev/null @@ -1,49 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose, response -from v1.v1 import V1Controller -from v2.v2_0 import V2Controller -from libra.admin_api.model.responses import Responses - - -class RootController(object): - """root control object.""" - - @expose('json') - def index(self): - response.status = 200 - return Responses.versions - - @expose('json') - def _default(self): - """default route.. acts as catch all for any wrong urls. - For now it returns a 404 because no action is defined for /""" - response.status = 404 - return Responses._default - - @expose() - def _lookup(self, primary_key, *remainder): - if primary_key == 'v1': - return V1Controller(), remainder - if primary_key == 'v2.0': - return V2Controller(), remainder - else: - response.status = 404 - return Responses._default - - @expose('json') - def notfound(self): - return Responses._default diff --git a/libra/admin_api/controllers/v1/__init__.py b/libra/admin_api/controllers/v1/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/controllers/v1/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/controllers/v1/devices.py b/libra/admin_api/controllers/v1/devices.py deleted file mode 100644 index df65622d..00000000 --- a/libra/admin_api/controllers/v1/devices.py +++ /dev/null @@ -1,335 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# pecan imports -from pecan import expose, response, abort -from pecan.rest import RestController -import wsmeext.pecan as wsme_pecan -from wsme.exc import ClientSideError -from libra.admin_api.model.validators import DeviceResp, DevicePost, DevicePut -from libra.common.api.lbaas import LoadBalancer, Device, db_session -from libra.common.api.lbaas import loadbalancers_devices -from libra.openstack.common import log - -LOG = log.getLogger(__name__) - - -class DevicesController(RestController): - def __init__(self, devid=None): - # Required for PUT requests. See _lookup() below - self.devid = devid - - @expose('json') - def get(self, device_id=None, marker=None, limit=None): - """ - Gets either a list of all devices or a single device details. - device_id is supplied if we are getting details of a single device - marker and limit are used to paginate when device_id is not - supplied. Currently this just supplies "LIMIT marker, limit" to - MySQL which is fine. - - :param device_id: id of device (unless getall) - Url: - GET /devices - List all configured devices - Url: - GET /devices/{device_id} - List details of a particular device - Returns: dict - """ - with db_session() as session: - # if we don't have an id then we want a list of all devices - if not device_id: - # return all devices - device = {'devices': []} - - if marker is None: - marker = 0 - if limit is None: - limit = 100 - - devices = session.query( - Device.id, Device.az, Device.updated, Device.created, - Device.status, Device.publicIpAddr, Device.name, - Device.type, Device.floatingIpAddr).\ - offset(marker).limit(limit) - - for item in devices: - dev = item._asdict() - dev['loadBalancers'] = [] - if dev['status'] != "OFFLINE": - # Find loadbalancers using device - lbids = session.query( - loadbalancers_devices.c.loadbalancer).\ - filter( - loadbalancers_devices.c.device == dev['id']).\ - all() - - lblist = [i[0] for i in lbids] - if len(lblist) > 0: - lbs = session.query( - LoadBalancer.id, LoadBalancer.tenantid).\ - filter(LoadBalancer.id.in_(lblist)).all() - - if lbs: - for item in lbs: - lb = item._asdict() - lb['hpcs_tenantid'] = lb['tenantid'] - del(lb['tenantid']) - dev['loadBalancers'].append(lb) - - device['devices'].append(dev) - - elif device_id == 'usage': - return self.usage() - else: - # return device detail - device = session.query( - Device.id, Device.az, Device.updated, Device.created, - Device.status, Device.publicIpAddr, Device.name, - Device.type, Device.floatingIpAddr - ).filter(Device.id == device_id).first() - - if not device: - response.status = 404 - session.rollback() - return dict( - status=404, - message="device id " + device_id + "not found" - ) - - device = device._asdict() - device['loadBalancers'] = [] - - if device['status'] != "OFFLINE": - lbids = session.query( - loadbalancers_devices.c.loadbalancer).\ - filter( - loadbalancers_devices.c.device == device['id']).\ - all() - - lblist = [i[0] for i in lbids] - lbs = session.query( - LoadBalancer.id, LoadBalancer.tenantid).\ - filter(LoadBalancer.id.in_(lblist)).all() - - if lbs: - for item in lbs: - lb = item._asdict() - device['loadBalancers'].append(lb) - - session.commit() - response.status = 200 - return device - - @wsme_pecan.wsexpose(DeviceResp, body=DevicePost) - def post(self, body=None): - """ Creates a new device entry in devices table. - :param None - Url: - POST /devices - JSON Request Body - { - "name":"device name", - "publicIpAddr":"15.x.x.x", - "floatingIpAddr":"15.x.x.x", - "az":2, - "type":"type descr" - } - - Returns: dict - { - "status": "OFFLINE", - "updated": "2013-06-06T10:17:19", - "name": "device name", - "created": "2013-06-06T10:17:19", - "loadBalancers": [], - "floatingIpAddr": "192.1678.98.99", - "publicIpAddr": "192.1678.98.99", - "az": 2, - "type": "type descr", - "id": 67 - } - """ - - # Get a new device object - device = Device() - device.name = body.name - device.publicIpAddr = body.publicIpAddr - device.floatingIpAddr = body.floatingIpAddr - device.az = body.az - device.type = body.type - device.pingCount = 0 - device.status = 'OFFLINE' - device.created = None - - with db_session() as session: - # write to database - session.add(device) - session.flush() - - # refresh the device record so we get the id back - session.refresh(device) - - try: - return_data = DeviceResp() - return_data.id = device.id - return_data.name = device.name - return_data.floatingIpAddr = device.floatingIpAddr - return_data.publicIpAddr = device.publicIpAddr - return_data.az = device.az - return_data.type = device.type - return_data.created = device.created - return_data.updated = device.updated - return_data.status = device.status - return_data.loadBalancers = [] - session.commit() - return return_data - except: - LOG.exception('Error communicating with load balancer pool') - errstr = 'Error communicating with load balancer pool' - session.rollback() - raise ClientSideError(errstr) - - @wsme_pecan.wsexpose(None, body=DevicePut) - def put(self, body=None): - """ Updates a device entry in devices table with new status. - Also, updates status of loadbalancers using this device - with ERROR or ACTIVE and the errmsg field - :param - NOTE the _lookup() hack used to get the device id - Url: - PUT /devices/ - JSON Request Body - { - "status": - "statusDescription": "Error Description" - } - - Returns: None - """ - - if not self.devid: - raise ClientSideError('Device ID is required') - - with db_session() as session: - device = session.query(Device).\ - filter(Device.id == self.devid).first() - - if not device: - session.rollback() - raise ClientSideError('Device ID is not valid') - - device.status = body.status - session.flush() - - lb_status = 'ACTIVE' if body.status == 'ONLINE' else body.status - lb_descr = body.statusDescription - - # Now find LB's associated with this Device and update their status - lbs = session.query( - loadbalancers_devices.c.loadbalancer).\ - filter(loadbalancers_devices.c.device == self.devid).\ - all() - - for lb in lbs: - session.query(LoadBalancer).\ - filter(LoadBalancer.id == lb[0]).\ - update({"status": lb_status, "errmsg": lb_descr}, - synchronize_session='fetch') - - session.flush() - - session.commit() - return - - @expose('json') - def delete(self, device_id): - """ Deletes a given device - :param device_id: id of device to delete - Urls: - DELETE /devices/{device_id} - Returns: None - """ - with db_session() as session: - # check for the device - device = session.query(Device.id).\ - filter(Device.id == device_id).first() - - if device is None: - session.rollback() - response.status = 400 - return dict( - faultcode="Client", - faultstring="Device ID is not valid" - ) - - # Is the device is attached to a LB - lb = session.query( - loadbalancers_devices.c.loadbalancer).\ - filter(loadbalancers_devices.c.device == device_id).\ - all() - - if lb: - session.rollback() - response.status = 400 - return dict( - faultcode="Client", - faultstring="Device belongs to a loadbalancer" - ) - try: - session.query(Device).filter(Device.id == device_id).delete() - session.flush() - session.commit() - return None - except: - session.rollback() - LOG.exception('Error deleting device from pool') - response.status = 500 - return dict( - faultcode="Server", - faultstring="Error deleting device from pool" - ) - - # Kludge to get to here because Pecan has a hard time with URL params - # and paths - def usage(self): - """Reports the device usage statistics for total, taken, and free - :param None - Url: - GET /devices/usage - Returns: dict - """ - with db_session() as session: - total = session.query(Device).count() - free = session.query(Device).filter(Device.status == 'OFFLINE').\ - count() - session.commit() - response.status = 200 - - return dict( - total=total, - free=free, - taken=total - free - ) - - @expose('json') - def _lookup(self, devid, *remainder): - """Routes more complex url mapping for PUT - Raises: 404 - """ - # Kludgy fix for PUT since WSME doesn't like IDs on the path - if devid: - return DevicesController(devid), remainder - abort(404) diff --git a/libra/admin_api/controllers/v1/v1.py b/libra/admin_api/controllers/v1/v1.py deleted file mode 100644 index 84d95d35..00000000 --- a/libra/admin_api/controllers/v1/v1.py +++ /dev/null @@ -1,36 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose, response -from devices import DevicesController -from libra.admin_api.model.responses import Responses - - -class V1Controller(object): - """v1 control object.""" - - @expose('json') - def index(self): - response.status = 200 - return Responses.versions_v1 - - @expose('json') - def _default(self): - """default route.. acts as catch all for any wrong urls. - For now it returns a 404 because no action is defined for /""" - response.status = 404 - return Responses._default - - devices = DevicesController() diff --git a/libra/admin_api/controllers/v2/__init__.py b/libra/admin_api/controllers/v2/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/controllers/v2/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/controllers/v2/devices.py b/libra/admin_api/controllers/v2/devices.py deleted file mode 100644 index d5e50440..00000000 --- a/libra/admin_api/controllers/v2/devices.py +++ /dev/null @@ -1,250 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# pecan imports -import ipaddress -from pecan import expose, request, response -from pecan.rest import RestController -from libra.admin_api.library.rebuild import rebuild_device -from libra.common.api.lbaas import LoadBalancer, Device, db_session -from libra.common.api.lbaas import loadbalancers_devices, Vip -from libra.openstack.common import log -from libra.admin_api.stats.stats_gearman import GearJobs -from libra.admin_api.acl import tenant_is_admin, tenant_is_user - -LOG = log.getLogger(__name__) - - -class DevicesController(RestController): - @expose('json') - def get( - self, device_id=None, status=None, name=None, ip=None, vip=None - ): - """ - Gets either a list of all devices or a single device details. - - :param device_id: id of device (unless getall) - Url: - GET /devices - List all configured devices - Url: - GET /devices/{device_id} - List details of a particular device - Returns: dict - """ - - # Work around routing issue in Pecan, doesn't work as a separate class - # due to this get accepting more than one parameter - if status == 'discover': - return self.discover(device_id) - - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - - with db_session() as session: - # if we don't have an id then we want a list of all devices - if not device_id: - # return all devices - device = {'devices': []} - - devices = session.query( - Device.id, Device.az, Device.updated, Device.created, - Device.status, Device.name, Device.type, - Device.floatingIpAddr.label('ip'), Vip.id.label('vipid'), - Vip.ip.label('vip')).outerjoin(Device.vip) - - if vip is not None: - # Search devices by vip, should only return one - vip_num = int(ipaddress.IPv4Address(unicode(vip))) - devices = devices.filter(Vip.ip == vip_num) - - if status is not None: - # Search devices by status - status = status.upper() - if status not in ['OFFLINE', 'ONLINE', 'ERROR']: - # Invalid status specified - response.status = 400 - return dict( - faultcode="Client", - faultstring="Invalid status: " + status - ) - devices = devices.filter(Device.status == status) - if name is not None: - # Search devices by name, should only return one - devices = devices.filter(Device.name == name) - if ip is not None: - # Search devices by IP, should only return one - devices = devices.filter(Device.floatingIpAddr == ip) - - devices.all() - - for item in devices: - dev = item._asdict() - if dev['vip']: - dev['vip'] = [{ - "id": dev['vipid'], - "address": str(ipaddress.IPv4Address(dev['vip'])) - }] - else: - dev['vip'] = [] - del(dev['vipid']) - device['devices'].append(dev) - else: - # return device detail - device = session.query( - Device.id, Device.az, Device.updated, Device.created, - Device.status, Device.floatingIpAddr.label('ip'), - Device.name, Device.type, Vip.id.label('vipid'), - Vip.ip.label('vip') - ).outerjoin(Device.vip).filter(Device.id == device_id).first() - - if not device: - response.status = 404 - session.rollback() - return dict( - faultcode="Client", - faultstring="device id " + device_id + "not found" - ) - - device = device._asdict() - if device['vip']: - device['vip'] = [{ - "id": device['vipid'], - "address": str(ipaddress.IPv4Address(device['vip'])) - }] - else: - device['vip'] = [] - del(device['vipid']) - - device['loadBalancers'] = [] - - if device['status'] != "OFFLINE": - lbids = session.query( - loadbalancers_devices.c.loadbalancer).\ - filter( - loadbalancers_devices.c.device == device['id']).\ - all() - - lblist = [i[0] for i in lbids] - lbs = session.query( - LoadBalancer.id, LoadBalancer.tenantid).\ - filter(LoadBalancer.id.in_(lblist)).all() - - if lbs: - for item in lbs: - lb = item._asdict() - device['loadBalancers'].append(lb) - - session.commit() - response.status = 200 - return device - - @expose('json') - def delete(self, device_id): - """ Deletes a given device - :param device_id: id of device to delete - Urls: - DELETE /devices/{device_id} - Returns: None - """ - - if not tenant_is_admin(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - - with db_session() as session: - # check for the device - device = session.query(Device.id).\ - filter(Device.id == device_id).first() - - if device is None: - session.rollback() - response.status = 404 - return dict( - faultcode="Client", - faultstring="Device " + device_id + " not found" - ) - - # Is the device is attached to a LB - lb = session.query( - loadbalancers_devices.c.loadbalancer).\ - filter(loadbalancers_devices.c.device == device_id).\ - all() - - if lb: - # Rebuild device - resp = rebuild_device(device_id) - response.status = resp[0] - return resp[1] - # If we get here there are no load balancers so delete device - response.status = 204 - try: - device = session.query(Device).\ - filter(Device.id == device_id).first() - device.status = 'DELETED' - session.commit() - return None - except: - session.rollback() - LOG.exception('Error deleting device from pool') - response.status = 500 - return dict( - faultcode="Server", - faultstring="Error deleting device from pool" - ) - return None - - def discover(self, device_id): - """ - Discovers information about a given libra worker based on device ID - """ - - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - - with db_session() as session: - device = session.query(Device.name).\ - filter(Device.id == device_id).scalar() - device_name = str(device) - session.commit() - if device_name is None: - response.status = 404 - return dict( - faultcode="Client", - faultstring="Device " + device_id + " not found" - ) - gearman = GearJobs() - discover = gearman.get_discover(device_name) - if discover is None: - response.status = 500 - return dict( - faultcode="Server", - faultstring="Could not discover device" - ) - return dict( - id=device_id, version=discover['version'], - release=discover['release'] - ) diff --git a/libra/admin_api/controllers/v2/loadbalancers.py b/libra/admin_api/controllers/v2/loadbalancers.py deleted file mode 100644 index e361c798..00000000 --- a/libra/admin_api/controllers/v2/loadbalancers.py +++ /dev/null @@ -1,193 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# pecan imports -import ipaddress -from pecan import expose, request, response -from pecan.rest import RestController -from libra.common.api.lbaas import LoadBalancer, Device, db_session -from libra.common.api.lbaas import Vip, Node, HealthMonitor -from libra.openstack.common import log -from libra.admin_api.acl import tenant_is_user - -LOG = log.getLogger(__name__) - - -class LoadBalancersController(RestController): - - @expose('json') - def get( - self, lb_id=None, status=None, tenant=None, name=None, ip=None, - vip=None - ): - """ - Gets either a list of all loadbalancers or a details for a single - loadbalancer. - - :param lb_id: id of the loadbalancer (unless getall) - Url: - GET /loadbalancers - List all loadbalancers - Url: - GET /loadbalancers/{lb_id} - List details of a particular device - Returns: dict - """ - - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - - with db_session() as session: - # if there is no lb_id then we want a list of loadbalancers - if not lb_id: - loadbalancers = {'loadBalancers': []} - - lbs = session.query( - LoadBalancer.id, LoadBalancer.name, LoadBalancer.status, - LoadBalancer.tenantid, Vip.id.label('vipid'), - Vip.ip.label('vip'), - Device.floatingIpAddr.label('ip'), - LoadBalancer.protocol, LoadBalancer.algorithm, - LoadBalancer.port, LoadBalancer.created, - LoadBalancer.updated - ).join(LoadBalancer.devices).join(Device.vip) - - if status is not None: - if status not in ('ACTIVE', 'BUILD', 'DEGRADED', 'ERROR'): - response.status = 400 - return dict( - faultcode="Client", - faultstring="Invalid status: " + status - ) - lbs = lbs.filter(LoadBalancer.status == status) - - if tenant is not None: - lbs = lbs.filter(LoadBalancer.tenantid == tenant) - - if name is not None: - lbs = lbs.filter(LoadBalancer.name == name) - - if ip is not None: - lbs = lbs.filter(Device.floatingIpAddr == ip) - - if vip is not None: - vip_num = int(ipaddress.IPv4Address(unicode(vip))) - lbs = lbs.filter(Vip.ip == vip_num) - - lbs.all() - - for item in lbs: - lb = item._asdict() - if lb['vip']: - lb['vip'] = [{ - "id": lb['vipid'], - "address": str(ipaddress.IPv4Address(lb['vip'])) - }] - del(lb['vip']) - del(lb['vipid']) - else: - lb['vip'] = [None] - del(lb['vipid']) - loadbalancers['loadBalancers'].append(lb) - - else: - lbs = session.query( - LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, - LoadBalancer.port, LoadBalancer.algorithm, - LoadBalancer.status, LoadBalancer.created, - LoadBalancer.updated, LoadBalancer.errmsg, - Device.id.label('device'), - Vip.id.label('vipid'), Vip.ip.label('vip') - ).join(LoadBalancer.devices).\ - outerjoin(Device.vip).\ - filter(LoadBalancer.id == lb_id).\ - first() - - if not lbs: - response.status = 404 - return dict( - faultcode="Client", - faultstring="Loadbalancer " + lb_id + " not found" - ) - loadbalancers = lbs._asdict() - nodes = session.query( - Node.id, Node.address, Node.port, Node.status, - Node.enabled, Node.weight - ).filter(Node.lbid == lb_id).all() - loadbalancers['nodes'] = [] - - for item in nodes: - node = item._asdict() - if node['enabled'] == 1: - node['condition'] = 'ENABLED' - else: - node['condition'] = 'DISABLED' - del node['enabled'] - node['port'] = str(node['port']) - node['id'] = str(node['id']) - if node['weight'] == 1: - del node['weight'] - loadbalancers['nodes'].append(node) - - if loadbalancers['vip']: - loadbalancers['vip'] = [{ - "id": loadbalancers['vipid'], - "address": str( - ipaddress.IPv4Address(loadbalancers['vip']) - ) - }] - del(loadbalancers['vip']) - del(loadbalancers['vipid']) - else: - loadbalancers['vip'] = [None] - del(loadbalancers['vipid']) - if not loadbalancers['errmsg']: - loadbalancers['statusDescription'] = None - else: - loadbalancers['statusDescription'] =\ - loadbalancers['errmsg'] - del(loadbalancers['errmsg']) - - monitor = session.query( - HealthMonitor.type, HealthMonitor.delay, - HealthMonitor.timeout, HealthMonitor.attempts, - HealthMonitor.path - ).join(LoadBalancer.monitors).\ - filter(LoadBalancer.id == lb_id).first() - - if monitor is None: - monitor_data = {} - else: - monitor_data = { - 'type': monitor.type, - 'delay': monitor.delay, - 'timeout': monitor.timeout, - 'attemptsBeforeDeactivation': monitor.attempts - } - if monitor.path: - monitor_data['path'] = monitor.path - - loadbalancers['monitor'] = monitor_data - - session.commit() - - return loadbalancers - -# TODO: we should be able to delete loadbalancers, require lb_id, name, -# tenant and a confirm flag for verification diff --git a/libra/admin_api/controllers/v2/status.py b/libra/admin_api/controllers/v2/status.py deleted file mode 100644 index bb57aaf5..00000000 --- a/libra/admin_api/controllers/v2/status.py +++ /dev/null @@ -1,260 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# pecan imports -import ConfigParser -import socket -import json -from pecan import expose, response, request, conf -from pecan.rest import RestController -from libra.common.api.lbaas import Device, db_session -from libra.common.api.lbaas import Vip, Limits, Counters, TenantLimits -from libra.openstack.common import log -from libra.admin_api.acl import tenant_is_admin, tenant_is_user - -from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker -from oslo.config import cfg - -LOG = log.getLogger(__name__) - - -class LimitsController(RestController): - """ a sub-controller for StatusController """ - @expose('json') - def get_one(self, tenant_id): - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - ret = {} - with db_session() as session: - limit = session.query(TenantLimits.loadbalancers).\ - filter(TenantLimits.tenantid == tenant_id).scalar() - - ret['maxLoadBalancers'] = limit - session.commit() - return ret - - @expose('json') - def get_all(self): - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - ret = {} - with db_session() as session: - limits = session.query(Limits.name, Limits.value).all() - if limits is None: - response.status = 500 - return dict( - faultcode="Server", - faultstring="Error obtaining limits" - ) - for limit in limits: - ret[limit.name] = limit.value - session.commit() - return ret - - @expose('json') - def put(self, tenant_id=None): - if not tenant_is_admin(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - try: - data = json.loads(request.body) - except: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Invalid JSON received" - ) - with db_session() as session: - if tenant_id is None: - for key, value in data.iteritems(): - limit = session.query(Limits).filter(Limits.name == key).\ - first() - if limit is None: - session.rollback() - response.status = 400 - return dict( - faultcode="Client", - faultstring="Limit not found: {0}".format(key) - ) - limit.value = value - else: - if 'maxLoadBalancers' in data: - limit = session.query(TenantLimits).\ - filter(TenantLimits.tenantid == tenant_id).first() - if limit is not None: - limit.loadbalancers = data['maxLoadBalancers'] - else: - new_limit = TenantLimits() - new_limit.tenantid = tenant_id - new_limit.loadbalancers = data['maxLoadBalancers'] - session.add(new_limit) - else: - session.rollback() - response.status = 400 - return dict( - faultcode="Client", - faultstring="No user settable limit in json" - ) - session.commit() - - -class PoolController(RestController): - @expose('json') - def get(self): - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - NULL = None # For pep8 - with db_session() as session: - dev_use = session.query(Device).\ - filter(Device.status == 'ONLINE').count() - dev_free = session.query(Device).\ - filter(Device.status == 'OFFLINE').count() - dev_error = session.query(Device).\ - filter(Device.status == 'ERROR').count() - dev_pending = session.query(Device).\ - filter(Device.status == 'DELETED').count() - vips_use = session.query(Vip).\ - filter(Vip.device > 0).count() - vips_free = session.query(Vip).\ - filter(Vip.device == NULL).count() - vips_bad = session.query(Vip).\ - filter(Vip.device == 0).count() - status = { - "devices": { - "used": dev_use, - "available": dev_free, - "error": dev_error, - "pendingDelete": dev_pending - }, - "vips": { - "used": vips_use, - "available": vips_free, - "bad": vips_bad - } - } - session.commit() - return status - - -class ServiceController(RestController): - @expose('json') - def get(self): - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - - ret = { - 'mysql': [], - 'gearman': [] - } - config = ConfigParser.SafeConfigParser() - config.read(cfg.CONF['config_file']) - - # Connect to all MySQL servers and test - for section in conf.database: - db_conf = config._sections[section] - conn_string = '''mysql+mysqlconnector://%s:%s@%s:%s/%s''' % ( - db_conf['username'], - db_conf['password'], - db_conf['host'], - db_conf['port'], - db_conf['schema'] - ) - - if 'ssl_key' in db_conf: - ssl_args = {'ssl': { - 'cert': db_conf['ssl_cert'], - 'key': db_conf['ssl_key'], - 'ca': db_conf['ssl_ca'] - }} - - engine = create_engine( - conn_string, isolation_level="READ COMMITTED", - pool_size=1, connect_args=ssl_args, pool_recycle=3600 - ) - else: - engine = create_engine( - conn_string, isolation_level="READ COMMITTED", - pool_size=1, pool_recycle=3600 - ) - session = sessionmaker(bind=engine)() - try: - session.execute("SELECT 1") - session.close() - ret['mysql'].append( - {"ip": db_conf['host'], "status": 'ONLINE'} - ) - except: - ret['mysql'].append( - {"ip": db_conf['host'], "status": 'OFFLINE'} - ) - - # Socket connect to all gearman servers, TODO: a better gearman test - for server in conf.gearman.server: - ghost, gport = server.split(':') - try: - sock = socket.socket() - sock.settimeout(5) - sock.connect((ghost, int(gport))) - sock.close() - ret['gearman'].append({"ip": ghost, "status": 'ONLINE'}) - except socket.error: - ret['gearman'].append({"ip": ghost, "status": 'OFFLINE'}) - try: - sock.close() - except: - pass - - return ret - - -class CountersController(RestController): - @expose('json') - def get(self): - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - with db_session() as session: - counters = session.query(Counters.name, Counters.value).all() - return counters - - -class StatusController(RestController): - pool = PoolController() - service = ServiceController() - counters = CountersController() - limits = LimitsController() diff --git a/libra/admin_api/controllers/v2/user.py b/libra/admin_api/controllers/v2/user.py deleted file mode 100644 index 0d9616fe..00000000 --- a/libra/admin_api/controllers/v2/user.py +++ /dev/null @@ -1,191 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -# pecan imports -from pecan import expose, request, response -from pecan.rest import RestController -from libra.openstack.common import log -from libra.admin_api.acl import tenant_is_user, tenant_is_admin -from libra.common.api.lbaas import db_session, AdminAuth - -LOG = log.getLogger(__name__) - - -class UserController(RestController): - @expose('json') - def get_all(self): - """ - Get a list of users - """ - if not tenant_is_admin(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - - with db_session() as session: - user = session.query( - AdminAuth.tenant_id.label('tenant'), AdminAuth.level - ).all() - session.commit() - return user - - @expose('json') - def get_one(self, tenant_id=None): - """ - Get a single Admin API user or details about self - """ - if not tenant_is_user(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - - with db_session() as session: - user = session.query(AdminAuth).\ - filter(AdminAuth.tenant_id == tenant_id).first() - if user is None: - response.status = 404 - return dict( - faultcode="Client", - faultstatus="User not found" - ) - ret = { - "tenant": user.tenant_id, - "level": user.level - } - session.commit() - return ret - - @expose('json') - def delete(self, tenant_id): - """ Delete a given user from the Admin API """ - if not tenant_is_admin(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - with db_session() as session: - user_test = session.query(AdminAuth).\ - filter(AdminAuth.tenant_id == tenant_id).count() - if user_test == 0: - response.status = 404 - return dict( - faultcode="Client", - faultstring="Tenant not found" - ) - session.query(AdminAuth).\ - filter(AdminAuth.tenant_id == tenant_id).delete() - session.commit() - response.status = 204 - return None - - @expose('json') - def post(self): - """ Add a new user to the Admin API """ - if not tenant_is_admin(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - try: - data = json.loads(request.body) - except: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Invalid JSON received" - ) - if data['tenant'] is None: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Tenant ID required" - ) - tenant_id = data['tenant'] - if 'level' not in data: - level = 'USER' - elif data['level'] not in ['USER', 'ADMIN']: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Only USER or ADMIN levels allowed" - ) - else: - level = data['level'] - with db_session() as session: - user_test = session.query(AdminAuth).\ - filter(AdminAuth.tenant_id == tenant_id).count() - if user_test > 0: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Tenant already has an account" - ) - user = AdminAuth() - user.tenant_id = tenant_id - user.level = level - session.add(user) - session.commit() - - @expose('json') - def put(self, tenant_id): - """ Change the leve for an Admin API user """ - if not tenant_is_admin(request.headers): - response.status = 401 - return dict( - faultcode="Client", - faultstring="Client not authorized to access this function" - ) - try: - data = json.loads(request.body) - except: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Invalid JSON received" - ) - if tenant_id is None: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Tenant ID required" - ) - if not data['level']: - level = 'USER' - elif data['level'] not in ['USER', 'ADMIN']: - response.status = 400 - return dict( - faultcode="Client", - faultstring="Only USER or ADMIN levels allowed" - ) - else: - level = data['level'] - with db_session() as session: - user = session.query(AdminAuth).\ - filter(AdminAuth.tenant_id == tenant_id).first() - if not user: - response.status = 404 - return dict( - faultcode="Client", - faultstring="Tenant does not have an account" - ) - user.level = level - session.commit() diff --git a/libra/admin_api/controllers/v2/v2_0.py b/libra/admin_api/controllers/v2/v2_0.py deleted file mode 100644 index 7d129a8d..00000000 --- a/libra/admin_api/controllers/v2/v2_0.py +++ /dev/null @@ -1,42 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose, response -from devices import DevicesController -from loadbalancers import LoadBalancersController -from status import StatusController -from user import UserController -from libra.admin_api.model.responses import Responses - - -class V2Controller(object): - """v2 control object.""" - - @expose('json') - def index(self): - response.status = 200 - return Responses.versions_v2_0 - - @expose('json') - def _default(self): - """default route.. acts as catch all for any wrong urls. - For now it returns a 404 because no action is defined for /""" - response.status = 404 - return Responses._default - - devices = DevicesController() - loadbalancers = LoadBalancersController() - status = StatusController() - user = UserController() diff --git a/libra/admin_api/device_pool/__init__.py b/libra/admin_api/device_pool/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/device_pool/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/device_pool/manage_pool.py b/libra/admin_api/device_pool/manage_pool.py deleted file mode 100644 index ce21e9e2..00000000 --- a/libra/admin_api/device_pool/manage_pool.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -import threading -import uuid - -from datetime import datetime -from gearman.constants import JOB_UNKNOWN -from oslo.config import cfg -from sqlalchemy import func - -from libra.common.api.lbaas import Device, PoolBuilding, Vip, db_session -from libra.common.api.lbaas import Counters -from libra.common.json_gearman import JSONGearmanClient -from libra.openstack.common import log - -# TODO: Lots of duplication of code here, need to cleanup - -LOG = log.getLogger(__name__) - - -class Pool(object): - - DELETE_SECONDS = cfg.CONF['admin_api'].delete_timer_seconds - PROBE_SECONDS = cfg.CONF['admin_api'].probe_timer_seconds - VIPS_SECONDS = cfg.CONF['admin_api'].vips_timer_seconds - - def __init__(self): - self.probe_timer = None - self.delete_timer = None - self.vips_time = None - self.server_id = cfg.CONF['admin_api']['server_id'] - self.number_of_servers = cfg.CONF['admin_api']['number_of_servers'] - self.vip_pool_size = cfg.CONF['admin_api']['vip_pool_size'] - self.node_pool_size = cfg.CONF['admin_api']['node_pool_size'] - - self.start_delete_sched() - self.start_probe_sched() - self.start_vips_sched() - - def shutdown(self): - if self.probe_timer: - self.probe_timer.cancel() - if self.delete_timer: - self.delete_timer.cancel() - if self.vips_timer: - self.vips_timer.cancel() - - def delete_devices(self): - """ Searches for all devices in the DELETED state and removes them """ - minute = datetime.now().minute - if self.server_id != minute % self.number_of_servers: - LOG.info('Not our turn to run delete check, sleeping') - self.start_delete_sched() - return - LOG.info('Running device delete check') - try: - message = [] - with db_session() as session: - devices = session.query(Device).\ - filter(Device.status == 'DELETED').all() - - for device in devices: - job_data = { - 'action': 'DELETE_DEVICE', - 'name': device.name - } - unique_uuid = str(uuid.uuid4()) - message.append(dict(task='libra_pool_mgm', - data=job_data, - unique=unique_uuid)) - - counter = session.query(Counters).\ - filter(Counters.name == 'devices_deleted').first() - counter.value += len(devices) - session.commit() - if not message: - LOG.info("No devices to delete") - else: - gear = GearmanWork() - gear.send_delete_message(message) - except: - LOG.exception("Exception when deleting devices") - - self.start_delete_sched() - - def probe_vips(self): - minute = datetime.now().minute - if self.server_id != minute % self.number_of_servers: - LOG.info('Not our turn to run vips check, sleeping') - self.start_vips_sched() - return - LOG.info('Running vips count probe check') - try: - with db_session() as session: - NULL = None # For pep8 - vip_count = session.query(Vip).\ - filter(Vip.device == NULL).count() - if vip_count >= self.vip_pool_size: - LOG.info("Enough vips exist, no work to do") - session.commit() - self.start_vips_sched() - return - - build_count = self.vip_pool_size - vip_count - self._build_vips(build_count) - except: - LOG.exception( - "Uncaught exception during vip pool expansion" - ) - self.start_vips_sched() - - def probe_devices(self): - minute = datetime.now().minute - if self.server_id != minute % self.number_of_servers: - LOG.info('Not our turn to run probe check, sleeping') - self.start_probe_sched() - return - LOG.info('Running device count probe check') - try: - with db_session() as session: - # Double check we have no outstanding builds assigned to us - session.query(PoolBuilding).\ - filter(PoolBuilding.server_id == self.server_id).\ - delete() - session.flush() - dev_count = session.query(Device).\ - filter(Device.status == 'OFFLINE').count() - if dev_count >= self.node_pool_size: - LOG.info("Enough devices exist, no work to do") - session.commit() - self.start_probe_sched() - return - - build_count = self.node_pool_size - dev_count - built = session.query(func.sum(PoolBuilding.qty)).first() - if not built[0]: - built = 0 - else: - built = built[0] - if build_count - built <= 0: - LOG.info( - "Other servers are building enough nodes" - ) - session.commit() - self.start_probe_sched() - return - build_count -= built - building = PoolBuilding() - building.server_id = self.server_id - building.qty = build_count - session.add(building) - session.commit() - - # Closed the DB session because we don't want it hanging around - # for a long time locking tables - self._build_nodes(build_count) - with db_session() as session: - session.query(PoolBuilding).\ - filter(PoolBuilding.server_id == self.server_id).\ - delete() - session.commit() - except: - LOG.exception("Uncaught exception during pool expansion") - self.start_probe_sched() - - def _build_nodes(self, count): - message = [] - it = 0 - job_data = {'action': 'BUILD_DEVICE'} - while it < count: - unique_uuid = str(uuid.uuid4()) - message.append(dict(task='libra_pool_mgm', - data=job_data, - unique=unique_uuid)) - it += 1 - gear = GearmanWork() - gear.send_create_message(message) - - def _build_vips(self, count): - message = [] - it = 0 - job_data = {'action': 'BUILD_IP'} - while it < count: - unique_uuid = str(uuid.uuid4()) - message.append(dict(task='libra_pool_mgm', - data=job_data, - unique=unique_uuid)) - it += 1 - gear = GearmanWork() - gear.send_vips_message(message) - - def start_probe_sched(self): - seconds = datetime.now().second - if seconds < self.PROBE_SECONDS: - sleeptime = self.PROBE_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.PROBE_SECONDS) - - LOG.info('Pool probe check timer sleeping for %d seconds', sleeptime) - self.probe_timer = threading.Timer(sleeptime, self.probe_devices, ()) - self.probe_timer.start() - - def start_vips_sched(self): - seconds = datetime.now().second - if seconds < self.VIPS_SECONDS: - sleeptime = self.VIPS_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.VIPS_SECONDS) - - LOG.info('Pool vips check timer sleeping for %d seconds', sleeptime) - self.vips_timer = threading.Timer(sleeptime, self.probe_vips, ()) - self.vips_timer.start() - - def start_delete_sched(self): - seconds = datetime.now().second - if seconds < self.DELETE_SECONDS: - sleeptime = self.DELETE_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.DELETE_SECONDS) - - LOG.info('Pool delete check timer sleeping for %d seconds', sleeptime) - self.delete_timer = threading.Timer(sleeptime, self.delete_devices, ()) - self.delete_timer.start() - - -class GearmanWork(object): - - def __init__(self): - server_list = [] - for server in cfg.CONF['gearman']['servers']: - host, port = server.split(':') - server_list.append({'host': host, - 'port': int(port), - 'keyfile': cfg.CONF['gearman']['ssl_key'], - 'certfile': cfg.CONF['gearman']['ssl_cert'], - 'ca_certs': cfg.CONF['gearman']['ssl_ca'], - 'keepalive': cfg.CONF['gearman']['keepalive'], - 'keepcnt': cfg.CONF['gearman']['keepcnt'], - 'keepidle': cfg.CONF['gearman']['keepidle'], - 'keepintvl': cfg.CONF['gearman']['keepintvl'] - }) - self.gearman_client = JSONGearmanClient(server_list) - - def send_delete_message(self, message): - LOG.info("Sending %d gearman messages", len(message)) - job_status = self.gearman_client.submit_multiple_jobs( - message, background=False, wait_until_complete=True, - max_retries=10, poll_timeout=30.0 - ) - delete_count = 0 - for status in job_status: - if status.state == JOB_UNKNOWN: - LOG.error('Gearman Job server fail') - continue - if status.timed_out: - LOG.error('Gearman timeout whilst deleting device') - continue - if status.result['response'] == 'FAIL': - LOG.error( - 'Pool manager failed to delete a device, removing from DB' - ) - - delete_count += 1 - with db_session() as session: - session.query(Device).\ - filter(Device.name == status.result['name']).delete() - session.commit() - - LOG.info('%d freed devices delete from pool', delete_count) - - def send_vips_message(self, message): - # TODO: make this gearman part more async, not wait for all builds - LOG.info("Sending %d gearman messages", len(message)) - job_status = self.gearman_client.submit_multiple_jobs( - message, background=False, wait_until_complete=True, - max_retries=10, poll_timeout=3600.0 - ) - built_count = 0 - for status in job_status: - if status.state == JOB_UNKNOWN: - LOG.error('Gearman Job server fail') - continue - if status.timed_out: - LOG.error('Gearman timeout whilst building vip') - continue - if status.result['response'] == 'FAIL': - LOG.error('Pool manager failed to build a vip') - continue - - built_count += 1 - try: - self._add_vip(status.result) - except: - LOG.exception( - 'Could not add vip to DB, node data: {0}' - .format(status.result) - ) - LOG.info( - '{vips} vips built and added to pool'.format(vips=built_count) - ) - - def send_create_message(self, message): - # TODO: make this gearman part more async, not wait for all builds - LOG.info("Sending {0} gearman messages".format(len(message))) - job_status = self.gearman_client.submit_multiple_jobs( - message, background=False, wait_until_complete=True, - max_retries=10, poll_timeout=3600.0 - ) - built_count = 0 - for status in job_status: - if status.state == JOB_UNKNOWN: - LOG.error('Gearman Job server fail') - continue - if status.timed_out: - LOG.error('Gearman timeout whilst building device') - continue - if status.result['response'] == 'FAIL': - LOG.error('Pool manager failed to build a device') - if 'name' in status.result: - self._add_bad_node(status.result) - continue - - built_count += 1 - try: - self._add_node(status.result) - except: - LOG.exception( - 'Could not add node to DB, node data: {0}' - .format(status.result) - ) - LOG.info( - '{nodes} devices built and added to pool'.format(nodes=built_count) - ) - - def _add_vip(self, data): - LOG.info('Adding vip {0} to DB'.format(data['ip'])) - vip = Vip() - vip.ip = int(ipaddress.IPv4Address(unicode(data['ip']))) - with db_session() as session: - session.add(vip) - counter = session.query(Counters).\ - filter(Counters.name == 'vips_built').first() - counter.value += 1 - session.commit() - - def _add_node(self, data): - LOG.info('Adding device {0} to DB'.format(data['name'])) - device = Device() - device.name = data['name'] - device.publicIpAddr = data['addr'] - # TODO: kill this field, make things use publicIpAddr instead - device.floatingIpAddr = data['addr'] - device.az = data['az'] - device.type = data['type'] - device.pingCount = 0 - device.status = 'OFFLINE' - device.created = None - with db_session() as session: - session.add(device) - counter = session.query(Counters).\ - filter(Counters.name == 'devices_built').first() - counter.value += 1 - session.commit() - - def _add_bad_node(self, data): - LOG.info( - 'Adding bad device {0} to DB to be deleted'.format(data['name']) - ) - device = Device() - device.name = data['name'] - device.publicIpAddr = data['addr'] - # TODO: kill this field, make things use publicIpAddr instead - device.floatingIpAddr = data['addr'] - device.az = data['az'] - device.type = data['type'] - device.pingCount = 0 - device.status = 'DELETED' - device.created = None - with db_session() as session: - session.add(device) - counter = session.query(Counters).\ - filter(Counters.name == 'devices_bad_built').first() - counter.value += 1 - session.commit() diff --git a/libra/admin_api/expunge/__init__.py b/libra/admin_api/expunge/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/expunge/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/expunge/expunge.py b/libra/admin_api/expunge/expunge.py deleted file mode 100644 index 91c633ad..00000000 --- a/libra/admin_api/expunge/expunge.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from datetime import datetime, timedelta -from oslo.config import cfg - -from libra.common.api.lbaas import LoadBalancer, db_session, Counters -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class ExpungeScheduler(object): - def __init__(self): - self.expunge_timer = None - self.expire_days = cfg.CONF['admin_api']['expire_days'] - if not self.expire_days: - LOG.info('Expunge not configured, disabled') - return - self.server_id = cfg.CONF['admin_api']['server_id'] - self.number_of_servers = cfg.CONF['admin_api']['number_of_servers'] - self.run_expunge() - - def shutdown(self): - if self.expunge_timer: - self.expunge_timer.cancel() - - def run_expunge(self): - day = datetime.now().day - if self.server_id != day % self.number_of_servers: - LOG.info('Not our turn to run expunge check, sleeping') - self.expunge_timer = threading.Timer( - 24 * 60 * 60, self.run_expunge, () - ) - with db_session() as session: - try: - exp = datetime.now() - timedelta( - days=int(self.expire_days) - ) - exp_time = exp.strftime('%Y-%m-%d %H:%M:%S') - LOG.info( - 'Expunging deleted loadbalancers older than {0}' - .format(exp_time) - ) - count = session.query( - LoadBalancer.status - ).filter(LoadBalancer.updated < exp_time).\ - filter(LoadBalancer.status == 'DELETED').delete() - counter = session.query(Counters).\ - filter(Counters.name == 'loadbalancers_expunged').first() - counter.value += count - session.commit() - LOG.info( - '{0} deleted load balancers expunged'.format(count) - ) - except: - LOG.exception('Exception occurred during expunge') - LOG.info('Expunge thread sleeping for 24 hours') - self.expunge_timer = threading.Timer( - 24 * 60 * 60, self.run_expunge, ()) diff --git a/libra/admin_api/library/__init__.py b/libra/admin_api/library/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/library/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/library/rebuild.py b/libra/admin_api/library/rebuild.py deleted file mode 100644 index c86ea7d4..00000000 --- a/libra/admin_api/library/rebuild.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -from libra.common.api.lbaas import loadbalancers_devices, Vip, Counters -from libra.common.api.lbaas import Device, LoadBalancer, db_session -from libra.common.api.gearman_client import submit_job, submit_vip_job -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -def rebuild_device(device_id): - new_device_id = None - new_device_name = None - with db_session() as session: - new_device = session.query(Device).\ - filter(~Device.id.in_( - session.query(loadbalancers_devices.c.device) - )).\ - filter(Device.status == "OFFLINE").\ - filter(Device.pingCount == 0).\ - with_lockmode('update').\ - first() - if new_device is None: - session.rollback() - LOG.error( - 'No spare devices when trying to rebuild device {0}' - .format(device_id) - ) - return ( - 500, - dict( - faultcode="Server", - faultstring='No spare devices when trying to rebuild ' - 'device {0}'.format(device_id) - ) - ) - new_device_id = new_device.id - new_device_name = new_device.name - LOG.info( - "Moving device {0} to device {1}" - .format(device_id, new_device_id) - ) - lbs = session.query(LoadBalancer).\ - join(LoadBalancer.devices).\ - filter(Device.id == device_id).all() - for lb in lbs: - lb.devices = [new_device] - lb.status = "ERROR(REBUILDING)" - new_device.status = 'BUILDING' - lbid = lbs[0].id - session.commit() - submit_job( - 'UPDATE', new_device_name, new_device_id, lbid - ) - with db_session() as session: - new_device = session.query(Device).\ - filter(Device.id == new_device_id).first() - vip = session.query(Vip).filter(Vip.device == device_id).first() - if vip: - vip.device = new_device_id - device = session.query(Device).\ - filter(Device.id == device_id).first() - device.status = 'DELETED' - lbs = session.query(LoadBalancer).\ - join(LoadBalancer.devices).\ - filter(Device.id == new_device_id).all() - for lb in lbs: - lb.errmsg = "Load Balancer rebuild on new device" - if vip: - LOG.info( - "Moving IP {0} and marking device {1} for deletion" - .format(str(ipaddress.IPv4Address(vip.ip)), device_id) - ) - submit_vip_job( - 'ASSIGN', new_device_name, vip.id - ) - new_device.status = 'ONLINE' - counter = session.query(Counters).\ - filter(Counters.name == 'loadbalancers_rebuild').first() - counter.value += 1 - session.commit() - return ( - 200, - dict(oldId=device_id, newId=new_device_id) - ) diff --git a/libra/admin_api/model/__init__.py b/libra/admin_api/model/__init__.py deleted file mode 100644 index 554a28fe..00000000 --- a/libra/admin_api/model/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def init_model(): - """ - This is a stub method which is called at application startup time. - - If you need to bind to a parse database configuration, set up tables or - ORM classes, or perform any database initialization, this is the - recommended place to do it. - - For more information working with databases, and some common recipes, - see http://pecan.readthedocs.org/en/latest/databases.html - """ - pass diff --git a/libra/admin_api/model/responses.py b/libra/admin_api/model/responses.py deleted file mode 100644 index f1799804..00000000 --- a/libra/admin_api/model/responses.py +++ /dev/null @@ -1,70 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the 'License'); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Class Responses -responder objects for framework. -""" - - -class Responses(object): - """404 - not found""" - _default = {'status': '404', 'message': 'Object not Found'} - - """not found """ - not_found = {'message': 'Object not Found'} - - """service_unavailable""" - service_unavailable = {'message': 'Service Unavailable'} - - versions = { - "versions": [ - { - "id": "v1", - "updated": "2014-01-13T16:55:25Z", - "status": "DEPRECATED" - }, - { - "id": "v2.0", - "updated": "2014-01-13T16:55:25Z", - "status": "CURRENT" - } - ] - } - - versions_v1 = { - "version": { - "id": "v1", - "updated": "2014-01-13T16:55:25Z", - "status": "DEPRECATED", - "media-types": [ - { - "base": "application/json" - } - ] - } - } - - versions_v2_0 = { - "version": { - "id": "v2", - "updated": "2014-01-13T16:55:25Z", - "status": "CURRENT", - "media-types": [ - { - "base": "application/json" - } - ] - } - } diff --git a/libra/admin_api/model/validators.py b/libra/admin_api/model/validators.py deleted file mode 100644 index e8281f74..00000000 --- a/libra/admin_api/model/validators.py +++ /dev/null @@ -1,49 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from wsme import types as wtypes -from wsme import wsattr -from wsme.types import Base, Enum - - -class LB(Base): - id = wsattr(int, mandatory=True) - tenantid = wsattr(wtypes.text, mandatory=True) - - -class DevicePost(Base): - name = wsattr(wtypes.text, mandatory=True) - publicIpAddr = wsattr(wtypes.text, mandatory=True) - floatingIpAddr = wsattr(wtypes.text, mandatory=True) - az = wsattr(int, mandatory=True) - type = wsattr(wtypes.text, mandatory=True) - - -class DeviceResp(Base): - id = int - name = wtypes.text - floatingIpAddr = wtypes.text - publicIpAddr = wtypes.text - az = int - type = wtypes.text - created = wtypes.text - updated = wtypes.text - status = wtypes.text - loadBalancers = wsattr(['LB']) - - -class DevicePut(Base): - status = Enum(wtypes.text, 'ONLINE', 'ERROR') - statusDescription = wsattr(wtypes.text, mandatory=True) diff --git a/libra/admin_api/stats/__init__.py b/libra/admin_api/stats/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/stats/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/stats/billing_sched.py b/libra/admin_api/stats/billing_sched.py deleted file mode 100644 index 49704da3..00000000 --- a/libra/admin_api/stats/billing_sched.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading -import datetime - -from oslo.config import cfg -from libra.common.api.lbaas import Billing, db_session -from libra.common.api.mnb import update_mnb, test_mnb_connection -from libra.openstack.common import timeutils -from libra.openstack.common import log as logging -from sqlalchemy.sql import func - - -LOG = logging.getLogger(__name__) - - -class BillingStats(object): - - EXISTS_SECONDS = cfg.CONF['admin_api'].exists_timer_seconds - USAGE_SECONDS = cfg.CONF['admin_api'].usage_timer_seconds - - def __init__(self, drivers): - self.drivers = drivers - self.usage_timer = None - self.exists_timer = None - self.server_id = cfg.CONF['admin_api']['server_id'] - self.number_of_servers = cfg.CONF['admin_api']['number_of_servers'] - self.exists_freq = cfg.CONF['admin_api'].exists_freq - self.usage_freq = cfg.CONF['admin_api'].usage_freq - self.start_usage_sched() - self.start_exists_sched() - - def shutdown(self): - if self.usage_timer: - self.usage_timer.cancel() - if self.exists_timer: - self.exists_timer.cancel() - - def update_usage(self): - # Work out if it is our turn to run - minute = datetime.datetime.now().minute - if self.server_id != minute % self.number_of_servers: - self.start_usage_sched() - return - - # Send periodic usage notifications - try: - self._exec_usage() - except Exception: - LOG.exception('Uncaught exception during billing usage update') - - # Need to restart timer after every billing cycle - self.start_usage_sched() - - def update_exists(self): - # Work out if it is our turn to run - minute = datetime.datetime.now().minute - if self.server_id != minute % self.number_of_servers: - self.start_exists_sched() - return - - # Send periodic exists notifications - try: - self._exec_exists() - except Exception: - LOG.exception('Uncaught exception during billing exists update') - - # Need to restart timer after every billing cycle - self.start_exists_sched() - - def _exec_exists(self): - with db_session() as session: - # Check if it's time to send exists notifications - delta = datetime.timedelta(minutes=self.exists_freq) - exp = timeutils.utcnow() - delta - exp_time = exp.strftime('%Y-%m-%d %H:%M:%S') - - updated = session.query( - Billing.last_update - ).filter(Billing.name == "exists").\ - filter(Billing.last_update > exp_time).\ - first() - - if updated is not None: - # Not time yet - LOG.info('Not time to send exists notifications yet {0}'. - format(exp_time)) - session.rollback() - return - - # Check the connection before sending the notifications - if not test_mnb_connection(): - # Abort the exists notifications - LOG.info("Aborting exists notifications. Could not connect") - session.rollback() - return - - # Update the exists timestamp now - session.query(Billing).\ - filter(Billing.name == "exists").\ - update({"last_update": func.now()}, - synchronize_session='fetch') - session.commit() - - # Send the notifications - update_mnb('lbaas.instance.exists', None, None) - - def _exec_usage(self): - with db_session() as session: - # Next check if it's time to send bandwidth usage notifications - delta = datetime.timedelta(minutes=self.usage_freq) - exp = timeutils.utcnow() - delta - - start, = session.query( - Billing.last_update - ).filter(Billing.name == "usage").\ - first() - - if start and start > exp: - # Not time yet - LOG.info('Not time to send usage statistics yet {0}'. - format(exp)) - session.rollback() - return - - # Check the connection before sending the notifications - if not test_mnb_connection(): - # Abort the exists notifications - LOG.info("Aborting usage notifications. Could not connect") - session.rollback() - return - - # Calculate the stopping point by rounding backward to the nearest - # N minutes. i.e. if N = 60, this will round us back to HH:00:00, - # or if N = 15, it will round us back to HH:15:00, HH:30:00, - # HH:45:00, or HH:00:00, whichever is closest. - N = cfg.CONF['admin_api'].usage_freq - now = timeutils.utcnow() - stop = now - datetime.timedelta(minutes=now.minute % N, - seconds=now.second, - microseconds=now.microsecond) - - # Release the lock - session.query(Billing).\ - filter(Billing.name == "usage").\ - update({"last_update": stop}, - synchronize_session='fetch') - session.commit() - - # Send the usage notifications. Pass the timestamps to save - # queries. - update_mnb('lbaas.bandwidth.usage', start, stop) - - def start_usage_sched(self): - # Always try to hit the expected second mark for usage - seconds = datetime.datetime.now().second - if seconds < self.USAGE_SECONDS: - sleeptime = self.USAGE_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.USAGE_SECONDS) - - LOG.info('LB usage timer sleeping for {secs} seconds' - .format(secs=sleeptime)) - self.usage_timer =\ - threading.Timer(sleeptime, self.update_usage, ()) - self.usage_timer.start() - - def start_exists_sched(self): - # Always try to hit the expected second mark for exists - seconds = datetime.datetime.now().second - if seconds < self.EXISTS_SECONDS: - sleeptime = self.EXISTS_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.EXISTS_SECONDS) - - LOG.info('LB exists timer sleeping for {secs} seconds' - .format(secs=sleeptime)) - self.exists_timer =\ - threading.Timer(sleeptime, self.update_exists, ()) - self.exists_timer.start() diff --git a/libra/admin_api/stats/drivers/__init__.py b/libra/admin_api/stats/drivers/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/stats/drivers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/stats/drivers/base.py b/libra/admin_api/stats/drivers/base.py deleted file mode 100644 index 02a53335..00000000 --- a/libra/admin_api/stats/drivers/base.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations - -known_drivers = { - 'dummy': 'libra.admin_api.stats.drivers.dummy.driver.DummyDriver', - 'datadog': 'libra.admin_api.stats.drivers.datadog.driver.DatadogDriver', - 'database': 'libra.admin_api.stats.drivers.database.driver.DbDriver' -} - - -class AlertDriver(object): - def send_alert(self, message, device_id, device_ip, device_name, device_tenant): - raise NotImplementedError() - - def send_delete(self, message, device_id, device_ip, device_name): - raise NotImplementedError() - - def send_node_change(self, message, lbid, degraded): - raise NotImplementedError() diff --git a/libra/admin_api/stats/drivers/database/__init__.py b/libra/admin_api/stats/drivers/database/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/stats/drivers/database/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/stats/drivers/database/driver.py b/libra/admin_api/stats/drivers/database/driver.py deleted file mode 100644 index a6a7272e..00000000 --- a/libra/admin_api/stats/drivers/database/driver.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations - -from libra.admin_api.stats.drivers.base import AlertDriver -from libra.common.api.lbaas import Device, LoadBalancer, db_session -from libra.common.api.lbaas import loadbalancers_devices -from libra.admin_api.library.rebuild import rebuild_device -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class DbDriver(AlertDriver): - def send_alert(self, message, device_id, device_ip, device_name, device_tenant): - with db_session() as session: - device = session.query(Device).\ - filter(Device.id == device_id).first() - - device.status = "ERROR" - errmsg = "Load Balancer has failed, attempting rebuild" - - lbs = session.query( - loadbalancers_devices.c.loadbalancer).\ - filter(loadbalancers_devices.c.device == device_id).\ - all() - - # TODO: make it so that we don't get stuck in LB ERROR here when - # a rebuild fails due to something like a bad device. Maybe have - # an attempted rebuild count? - for lb in lbs: - session.query(LoadBalancer).\ - filter(LoadBalancer.id == lb[0]).\ - update({"status": "ERROR", "errmsg": errmsg}, - synchronize_session='fetch') - - session.flush() - - session.commit() - self._rebuild_device(device_id) - - def send_delete(self, message, device_id, device_ip, device_name): - with db_session() as session: - session.query(Device).\ - filter(Device.id == device_id).\ - update({"status": "DELETED"}, synchronize_session='fetch') - session.commit() - - def send_node_change(self, message, lbid, degraded): - with db_session() as session: - lb = session.query(LoadBalancer).\ - filter(LoadBalancer.id == lbid).first() - - if lb.status == 'ERROR': - lb.errmsg = "Load balancer has failed" - elif lb.status == 'ACTIVE' and degraded: - lb.errmsg = "A node on the load balancer has failed" - lb.status = 'DEGRADED' - elif lb.status == 'DEGRADED' and not degraded: - lb.errmsg = "A node on the load balancer has recovered" - lb.status = 'ACTIVE' - - session.commit() - - def _rebuild_device(self, device_id): - rebuild_device(device_id) diff --git a/libra/admin_api/stats/drivers/datadog/__init__.py b/libra/admin_api/stats/drivers/datadog/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/stats/drivers/datadog/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/stats/drivers/datadog/driver.py b/libra/admin_api/stats/drivers/datadog/driver.py deleted file mode 100644 index 82a078e4..00000000 --- a/libra/admin_api/stats/drivers/datadog/driver.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations - -from dogapi import dog_http_api as api -from oslo.config import cfg - -from libra.admin_api.stats.drivers.base import AlertDriver -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class DatadogDriver(AlertDriver): - def __init__(self): - super(DatadogDriver, self).__init__() - api.api_key = cfg.CONF['admin_api']['datadog_api_key'] - api.application_key = cfg.CONF['admin_api']['datadog_app_key'] - self.dd_env = cfg.CONF['admin_api']['datadog_env'] - self.dd_tags = cfg.CONF['admin_api']['datadog_tags'] - self.dd_message_tail = cfg.CONF['admin_api']['datadog_message_tail'] - - def send_alert(self, message, device_id, device_ip, device_name, device_tenant): - title = 'Load balancer failure in {0}: {1} {2} {3} {4}'.format( - self.dd_env, device_id, device_ip, device_name, device_tenant) - text = 'Load balancer failed with message {0} {1}'.format( - message, self.dd_message_tail - ) - tags = self.dd_tags.split() - resp = api.event_with_response( - title, text, tags=tags, alert_type='error' - ) - LOG.info('Datadog alert response: {0}'.format(resp)) - - def send_delete(self, message, device_id, device_ip, device_name): - title = 'Load balancer unreachable in {0}: {1} {2}'.\ - format(self.dd_env, device_ip, device_name) - text = 'Load balancer unreachable with message {0} {1}'.format( - message, self.dd_message_tail - ) - tags = self.dd_tags.split() - resp = api.event_with_response( - title, text, tags=tags, alert_type='success' - ) - LOG.info('Datadog alert response: {0}'.format(resp)) diff --git a/libra/admin_api/stats/drivers/dummy/__init__.py b/libra/admin_api/stats/drivers/dummy/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/admin_api/stats/drivers/dummy/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/admin_api/stats/drivers/dummy/driver.py b/libra/admin_api/stats/drivers/dummy/driver.py deleted file mode 100644 index c40e6d9f..00000000 --- a/libra/admin_api/stats/drivers/dummy/driver.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations - -from libra.admin_api.stats.drivers.base import AlertDriver -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class DummyDriver(AlertDriver): - def send_alert(self, message, device_id, device_ip, device_name, device_tenant): - LOG.info('Dummy alert of: {0}'.format(message)) - - def send_delete(self, message, device_id, device_ip, device_name): - LOG.info('Dummy delete of: {0}'.format(message)) - - def send_node_change(self, message, lbid, degraded): - LOG.info('Dummy node change of: {0}'.format(message)) diff --git a/libra/admin_api/stats/offline_sched.py b/libra/admin_api/stats/offline_sched.py deleted file mode 100644 index 0e3436a1..00000000 --- a/libra/admin_api/stats/offline_sched.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from datetime import datetime -from oslo.config import cfg - -from libra.common.api.lbaas import Counters, Device, db_session -from libra.admin_api.stats.stats_gearman import GearJobs -from libra.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -class OfflineStats(object): - - OFFLINE_SECONDS = cfg.CONF['admin_api'].offline_timer_seconds - - def __init__(self, drivers): - self.drivers = drivers - self.offline_timer = None - self.ping_limit = cfg.CONF['admin_api']['stats_offline_ping_limit'] - self.error_limit = cfg.CONF['admin_api']['stats_device_error_limit'] - self.server_id = cfg.CONF['admin_api']['server_id'] - self.number_of_servers = cfg.CONF['admin_api']['number_of_servers'] - - self.start_offline_sched() - - def shutdown(self): - if self.offline_timer: - self.offline_timer.cancel() - - def check_offline_lbs(self): - # Work out if it is our turn to run - minute = datetime.now().minute - if self.server_id != minute % self.number_of_servers: - LOG.info('Not our turn to run OFFLINE check, sleeping') - self.start_offline_sched() - return - tested = 0 - failed = 0 - try: - tested, failed = self._exec_offline_check() - except Exception: - LOG.exception('Uncaught exception during OFFLINE check') - # Need to restart timer after every ping cycle - LOG.info( - '{tested} OFFLINE loadbalancers tested, {failed} failed' - .format(tested=tested, failed=failed) - ) - self.start_offline_sched() - - def _exec_offline_check(self): - tested = 0 - failed = 0 - node_list = [] - LOG.info('Running OFFLINE check') - with db_session() as session: - # Join to ensure device is in-use - devices = session.query( - Device.id, Device.name - ).filter(Device.status == 'OFFLINE').all() - - tested = len(devices) - if tested == 0: - LOG.info('No OFFLINE Load Balancers to check') - return (0, 0) - for lb in devices: - node_list.append(lb.name) - gearman = GearJobs() - failed_lbs = gearman.offline_check(node_list) - failed = len(failed_lbs) - if failed > self.error_limit: - LOG.error( - 'Too many simultaneous Load Balancer Failures.' - ' Aborting deletion attempt' - ) - return tested, failed - - if failed > 0: - self._send_delete(failed_lbs) - - # Clear the ping counts for all devices not in - # the failed list - succeeded = list(set(node_list) - set(failed_lbs)) - session.query(Device.name, Device.pingCount).\ - filter(Device.name.in_(succeeded)).\ - update({"pingCount": 0}, synchronize_session='fetch') - - session.commit() - - return tested, failed - - def _send_delete(self, failed_nodes): - with db_session() as session: - for lb in failed_nodes: - # Get the current ping count - data = session.query( - Device.id, Device.pingCount, Device.name, Device.floatingIpAddr).\ - filter(Device.name == lb).first() - - if not data: - LOG.error( - 'Device {0} no longer exists'.format(data.id) - ) - continue - - if data.pingCount < self.ping_limit: - data.pingCount += 1 - LOG.error( - 'Offline Device {0} has failed {1} ping attempts'. - format(lb, data.pingCount) - ) - session.query(Device).\ - filter(Device.name == lb).\ - update({"pingCount": data.pingCount}, - synchronize_session='fetch') - session.flush() - continue - - message = ( - 'Load balancer {0} unreachable and marked for deletion'. - format(lb) - ) - for driver in self.drivers: - instance = driver() - LOG.info( - 'Sending delete request for {0} to {1}'.format( - lb, instance.__class__.__name__ - ) - ) - instance.send_delete(message, data.id, data.floatingIpAddr, data.name) - counter = session.query(Counters).\ - filter(Counters.name == 'devices_offline_failed').first() - counter.value += 1 - session.commit() - - def start_offline_sched(self): - # Always try to hit the expected second mark for offline checks - seconds = datetime.now().second - if seconds < self.OFFLINE_SECONDS: - sleeptime = self.OFFLINE_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.OFFLINE_SECONDS) - - LOG.info('LB offline check timer sleeping for {secs} seconds' - .format(secs=sleeptime)) - self.offline_timer = threading.Timer( - sleeptime, self.check_offline_lbs, () - ) - self.offline_timer.start() diff --git a/libra/admin_api/stats/ping_sched.py b/libra/admin_api/stats/ping_sched.py deleted file mode 100644 index 64b2680e..00000000 --- a/libra/admin_api/stats/ping_sched.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from datetime import datetime -from oslo.config import cfg -from libra.common.api.lbaas import LoadBalancer, Device, Node, db_session -from libra.openstack.common import log as logging -from libra.admin_api.stats.stats_gearman import GearJobs - -LOG = logging.getLogger(__name__) - - -class PingStats(object): - - PING_SECONDS = cfg.CONF['admin_api'].ping_timer_seconds - - def __init__(self, drivers): - self.drivers = drivers - self.ping_timer = None - self.error_limit = cfg.CONF['admin_api']['stats_device_error_limit'] - self.server_id = cfg.CONF['admin_api']['server_id'] - self.number_of_servers = cfg.CONF['admin_api']['number_of_servers'] - self.stats_driver = cfg.CONF['admin_api']['stats_driver'] - LOG.info("Selected stats drivers: %s", self.stats_driver) - - self.start_ping_sched() - - def shutdown(self): - if self.ping_timer: - self.ping_timer.cancel() - - def ping_lbs(self): - # Work out if it is our turn to run - minute = datetime.now().minute - if self.server_id != minute % self.number_of_servers: - LOG.info('Not our turn to run ping check, sleeping') - self.start_ping_sched() - return - pings = 0 - failed = 0 - try: - pings, failed = self._exec_ping() - except Exception: - LOG.exception('Uncaught exception during LB ping') - # Need to restart timer after every ping cycle - LOG.info('{pings} loadbalancers pinged, {failed} failed' - .format(pings=pings, failed=failed)) - self.start_ping_sched() - - def _exec_ping(self): - pings = 0 - failed = 0 - node_list = [] - LOG.info('Running ping check') - with db_session() as session: - devices = session.query( - Device.id, Device.name - ).filter(Device.status == 'ONLINE').all() - pings = len(devices) - if pings == 0: - LOG.info('No LBs to ping') - return (0, 0) - for lb in devices: - node_list.append(lb.name) - gearman = GearJobs() - failed_lbs, node_status = gearman.send_pings(node_list) - failed = len(failed_lbs) - if failed > self.error_limit: - LOG.error( - 'Too many simultaneous Load Balancer Failures.' - ' Aborting recovery attempt' - ) - return pings, failed - - if failed > 0: - self._send_fails(failed_lbs) - - # Process node status after lb status - self._update_nodes(node_status) - session.commit() - - return pings, failed - - def _send_fails(self, failed_lbs): - with db_session() as session: - for lb in failed_lbs: - data = self._get_lb(lb, session) - if not data: - LOG.error( - 'Device {0} has no Loadbalancer attached'. - format(lb) - ) - continue - message = ( - 'Load balancer failed\n' - 'ID: {0}\n' - 'IP: {1}\n' - 'name: {2}\n' - 'tenant: {3}\n'.format( - data.id, data.floatingIpAddr, data.name, - data.tenantid - ) - ) - for driver in self.drivers: - instance = driver() - LOG.info( - 'Sending failure of {0} to {1}'.format( - lb, instance.__class__.__name__ - ) - ) - instance.send_alert(message, data.id, data.floatingIpAddr, data.name, data.tenantid) - session.commit() - - def _get_lb(self, lb, session): - lb = session.query( - LoadBalancer.tenantid, Device.floatingIpAddr, Device.id, Device.name - ).join(LoadBalancer.devices).\ - filter(Device.name == lb).first() - - return lb - - def _update_nodes(self, node_status): - lbids = [] - degraded = [] - failed_nodes = dict() - repaired_nodes = dict() - errormsg = dict() - with db_session() as session: - for lb, nodes in node_status.iteritems(): - data = self._get_lb(lb, session) - if not data: - LOG.error( - 'Device {0} has no Loadbalancer attached'. - format(lb) - ) - continue - - # Iterate the list of nodes returned from the worker - # and track any status changes - for node in nodes: - # Get the last known status from the nodes table - node_data = session.query(Node).\ - filter(Node.id == int(node['id'])).first() - - if node_data is None: - LOG.error( - 'DB error getting node {0} to set status {1}' - .format(node['id'], node['status']) - ) - continue - - # Note all degraded LBs - if (node['status'] == 'DOWN' and - node_data.lbid not in degraded): - degraded.append(node_data.lbid) - - new_status = None - # Compare node status to the workers status - if (node['status'] == 'DOWN' and - node_data.status == 'ONLINE'): - new_status = 'ERROR' - if node_data.lbid not in failed_nodes: - failed_nodes[node_data.lbid] = [] - failed_nodes[node_data.lbid].append(node['id']) - elif (node['status'] == 'UP' and - node_data.status == 'ERROR'): - new_status = 'ONLINE' - if node_data.lbid not in repaired_nodes: - repaired_nodes[node_data.lbid] = [] - repaired_nodes[node_data.lbid].append(node['id']) - else: - # No change - continue - - # Note all LBs with node status changes - if node_data.lbid not in lbids: - lbids.append(node_data.lbid) - errormsg[node_data.lbid] =\ - 'Node status change ID:'\ - ' {0}, IP: {1}, tenant: {2}'.\ - format( - node_data.lbid, - data.floatingIpAddr, - data.tenantid) - - # Change the node status in the node table - session.query(Node).\ - filter(Node.id == int(node['id'])).\ - update({"status": new_status}, - synchronize_session='fetch') - session.flush() - session.commit() - - # Generate a status message per LB for the alert. - for lbid in lbids: - message = errormsg[lbid] - if lbid in failed_nodes: - message += ' failed:' - message += ','.join(str(x) for x in failed_nodes[lbid]) - message += '\n' - - if lbid in repaired_nodes: - message += ' repaired: ' - message += ','.join(str(x) for x in repaired_nodes[lbid]) - - # Send the LB node change alert - if lbid in degraded: - is_degraded = True - else: - is_degraded = False - for driver in self.drivers: - instance = driver() - LOG.info( - 'Sending change of node status on LB {0} to {1}'.format( - lbid, instance.__class__.__name__) - ) - - try: - instance.send_node_change(message, lbid, is_degraded) - except NotImplementedError: - pass - - def start_ping_sched(self): - # Always try to hit the expected second mark for pings - seconds = datetime.now().second - if seconds < self.PING_SECONDS: - sleeptime = self.PING_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.PING_SECONDS) - - LOG.info('LB ping check timer sleeping for %d seconds', sleeptime) - self.ping_timer = threading.Timer(sleeptime, self.ping_lbs, ()) - self.ping_timer.start() diff --git a/libra/admin_api/stats/stats_gearman.py b/libra/admin_api/stats/stats_gearman.py deleted file mode 100644 index f180048b..00000000 --- a/libra/admin_api/stats/stats_gearman.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from gearman.constants import JOB_UNKNOWN -from oslo.config import cfg -from libra.common.json_gearman import JSONGearmanClient -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class GearJobs(object): - def __init__(self): - self.poll_timeout = cfg.CONF['admin_api']['stats_poll_timeout'] - self.poll_retry = cfg.CONF['admin_api']['stats_poll_timeout_retry'] - - server_list = [] - for server in cfg.CONF['gearman']['servers']: - host, port = server.split(':') - server_list.append({'host': host, - 'port': int(port), - 'keyfile': cfg.CONF['gearman']['ssl_key'], - 'certfile': cfg.CONF['gearman']['ssl_cert'], - 'ca_certs': cfg.CONF['gearman']['ssl_ca'], - 'keepalive': cfg.CONF['gearman']['keepalive'], - 'keepcnt': cfg.CONF['gearman']['keepcnt'], - 'keepidle': cfg.CONF['gearman']['keepidle'], - 'keepintvl': cfg.CONF['gearman']['keepintvl'] - }) - self.gm_client = JSONGearmanClient(server_list) - - def send_pings(self, node_list): - # TODO: lots of duplicated code that needs cleanup - list_of_jobs = [] - failed_list = [] - node_status = dict() - retry_list = [] - # The message name is STATS for historical reasons. Real - # data statistics are gathered with METRICS messages. - job_data = {"hpcs_action": "STATS"} - for node in node_list: - list_of_jobs.append(dict(task=str(node), data=job_data)) - submitted_pings = self.gm_client.submit_multiple_jobs( - list_of_jobs, background=False, wait_until_complete=True, - poll_timeout=self.poll_timeout - ) - for ping in submitted_pings: - if ping.state == JOB_UNKNOWN: - # TODO: Gearman server failed, ignoring for now - LOG.error('Gearman Job server fail') - continue - if ping.timed_out: - # Ping timeout - retry_list.append(ping.job.task) - continue - if ping.result['hpcs_response'] == 'FAIL': - if ( - 'status' in ping.result and - ping.result['status'] == 'DELETED' - ): - continue - # Error returned by Gearman - failed_list.append(ping.job.task) - continue - else: - if 'nodes' in ping.result: - node_status[ping.job.task] = ping.result['nodes'] - - list_of_jobs = [] - if len(retry_list) > 0: - LOG.info( - "{0} pings timed out, retrying".format(len(retry_list)) - ) - for node in retry_list: - list_of_jobs.append(dict(task=str(node), data=job_data)) - submitted_pings = self.gm_client.submit_multiple_jobs( - list_of_jobs, background=False, wait_until_complete=True, - poll_timeout=self.poll_retry - ) - for ping in submitted_pings: - if ping.state == JOB_UNKNOWN: - # TODO: Gearman server failed, ignoring for now - LOG.error('Gearman Job server fail') - continue - if ping.timed_out: - # Ping timeout - failed_list.append(ping.job.task) - continue - if ping.result['hpcs_response'] == 'FAIL': - if ( - 'status' in ping.result and - ping.result['status'] == 'DELETED' - ): - continue - # Error returned by Gearman - failed_list.append(ping.job.task) - continue - else: - if 'nodes' in ping.result: - node_status[ping.job.task] = ping.result['nodes'] - - return failed_list, node_status - - def offline_check(self, node_list): - list_of_jobs = [] - failed_list = [] - job_data = {"hpcs_action": "DIAGNOSTICS"} - for node in node_list: - list_of_jobs.append(dict(task=str(node), data=job_data)) - submitted_pings = self.gm_client.submit_multiple_jobs( - list_of_jobs, background=False, wait_until_complete=True, - poll_timeout=self.poll_timeout - ) - for ping in submitted_pings: - if ping.state == JOB_UNKNOWN: - LOG.error( - "Gearman Job server failed during OFFLINE check of {0}". - format(ping.job.task) - ) - elif ping.timed_out: - failed_list.append(ping.job.task) - elif ping.result['network'] == 'FAIL': - failed_list.append(ping.job.task) - else: - gearman_count = 0 - gearman_fail = 0 - for gearman_test in ping.result['gearman']: - gearman_count += 1 - if gearman_test['status'] == 'FAIL': - gearman_fail += 1 - # Need 2/3rds gearman up - max_fail_count = gearman_count / 3 - if gearman_fail > max_fail_count: - failed_list.append(ping.job.task) - return failed_list - - def get_discover(self, name): - # Used in the v2 devices controller - job_data = {"hpcs_action": "DISCOVER"} - job = self.gm_client.submit_job( - str(name), job_data, background=False, wait_until_complete=True, - poll_timeout=10 - ) - if job.state == JOB_UNKNOWN: - # Gearman server failed - return None - elif job.timed_out: - # Time out is a fail - return None - elif job.result['hpcs_response'] == 'FAIL': - # Fail response is a fail - return None - return job.result - - def get_stats(self, node_list): - # TODO: lots of duplicated code that needs cleanup - list_of_jobs = [] - failed_list = [] - retry_list = [] - results = {} - job_data = {"hpcs_action": "METRICS"} - for node in node_list: - list_of_jobs.append(dict(task=str(node), data=job_data)) - submitted_stats = self.gm_client.submit_multiple_jobs( - list_of_jobs, background=False, wait_until_complete=True, - poll_timeout=self.poll_timeout - ) - for stats in submitted_stats: - if stats.state == JOB_UNKNOWN: - # TODO: Gearman server failed, ignoring for now - retry_list.append(stats.job.task) - elif stats.timed_out: - # Timeout - retry_list.append(stats.job.task) - elif stats.result['hpcs_response'] == 'FAIL': - # Error returned by Gearman - failed_list.append(stats.job.task) - else: - # Success - results[stats.job.task] = stats.result - - list_of_jobs = [] - if len(retry_list) > 0: - LOG.info( - "{0} Statistics gathering timed out, retrying". - format(len(retry_list)) - ) - for node in retry_list: - list_of_jobs.append(dict(task=str(node), data=job_data)) - submitted_stats = self.gm_client.submit_multiple_jobs( - list_of_jobs, background=False, wait_until_complete=True, - poll_timeout=self.poll_retry - ) - for stats in submitted_stats: - if stats.state == JOB_UNKNOWN: - # TODO: Gearman server failed, ignoring for now - LOG.error( - "Gearman Job server failed gathering statistics " - "on {0}".format(stats.job.task) - ) - failed_list.append(stats.job.task) - elif stats.timed_out: - # Timeout - failed_list.append(stats.job.task) - elif stats.result['hpcs_response'] == 'FAIL': - # Error returned by Gearman - failed_list.append(stats.job.task) - else: - # Success - results[stats.job.task] = stats.result - - return failed_list, results diff --git a/libra/admin_api/stats/stats_sched.py b/libra/admin_api/stats/stats_sched.py deleted file mode 100644 index 6d8594fe..00000000 --- a/libra/admin_api/stats/stats_sched.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading -import datetime - -from oslo.config import cfg -from libra.common.api.lbaas import LoadBalancer, Device, db_session -from libra.common.api.lbaas import Billing, Stats -from libra.admin_api.stats.stats_gearman import GearJobs -from libra.openstack.common import timeutils -from libra.openstack.common import log as logging -from sqlalchemy.sql import func - - -LOG = logging.getLogger(__name__) - - -class UsageStats(object): - - STATS_SECONDS = cfg.CONF['admin_api'].stats_timer_seconds - - def __init__(self, drivers): - self.drivers = drivers - self.stats_timer = None - self.server_id = cfg.CONF['admin_api']['server_id'] - self.number_of_servers = cfg.CONF['admin_api']['number_of_servers'] - self.stats_freq = cfg.CONF['admin_api'].stats_freq - - self.start_stats_sched() - - def shutdown(self): - if self.stats_timer: - self.stats_timer.cancel() - - def gather_stats(self): - # Work out if it is our turn to run - minute = datetime.datetime.now().minute - if self.server_id != minute % self.number_of_servers: - self.start_stats_sched() - return - total = 0 - fail = 0 - try: - fail, total = self._exec_stats() - except Exception: - LOG.exception('Uncaught exception during stats collection') - - # Need to restart timer after every stats cycle - LOG.info('{total} lb device stats queried, {fail} failed' - .format(total=total, fail=fail)) - self.start_stats_sched() - - def _exec_stats(self): - failed = 0 - node_list = [] - with db_session() as session: - delta = datetime.timedelta(minutes=self.stats_freq) - exp = timeutils.utcnow() - delta - exp_time = exp.strftime('%Y-%m-%d %H:%M:%S') - - updated = session.query( - Billing.last_update - ).filter(Billing.name == "stats").\ - filter(Billing.last_update > exp_time).\ - first() - - if updated is not None: - # Not time yet - LOG.info('Not time to gather stats yet {0}'.format(exp_time)) - session.rollback() - return 0, 0 - - # Update the stats timestamp - session.query(Billing).\ - filter(Billing.name == "stats").\ - update({"last_update": func.now()}, - synchronize_session='fetch') - - # Get all the online devices to query for stats - devices = session.query( - Device.id, Device.name - ).filter(Device.status == 'ONLINE').all() - - if devices is None or len(devices) == 0: - LOG.error('No ONLINE devices to gather usage stats from') - session.rollback() - return 0, 0 - total = len(devices) - - for device in devices: - node_list.append(device.name) - gearman = GearJobs() - failed_list, results = gearman.get_stats(node_list) - failed = len(failed_list) - - if failed > 0: - self._send_fails(failed_list) - - if total > failed: - # We have some success - self._update_stats(results, failed_list) - session.commit() - else: - # Everything failed. Retry these on the next timer firing - session.rollback() - - return failed, total - - def _update_stats(self, results, failed_list): - with db_session() as session: - lbs = session.query( - LoadBalancer.id, - LoadBalancer.protocol, - LoadBalancer.status, - Device.name - ).join(LoadBalancer.devices).\ - filter(Device.status == 'ONLINE').all() - - if lbs is None: - session.rollback() - LOG.error('No Loadbalancers found when updating stats') - return - - total = len(lbs) - added = 0 - for lb in lbs: - if lb.name not in results: - if lb.name not in failed_list: - LOG.error( - 'No stats results found for Device {0}, LBID {1}' - .format(lb.name, lb.id)) - continue - - result = results[lb.name] - protocol = lb.protocol.lower() - if protocol != "http": - # GALERA or TCP = TCP at the worker - protocol = "tcp" - - bytes_out = -1 - for data in result["loadBalancers"]: - if data["protocol"] == protocol: - bytes_out = data["bytes_out"] - - if bytes_out == -1: - LOG.error( - 'No stats found for Device {0}, ' - 'LBID {1}, protocol {2}' - .format(lb.name, lb.id, protocol)) - continue - - new_entry = Stats() - new_entry.lbid = lb.id - new_entry.period_start = result["utc_start"] - new_entry.period_end = result["utc_end"] - new_entry.bytes_out = bytes_out - new_entry.status = lb.status - session.add(new_entry) - session.flush - added += 1 - session.commit() - LOG.info( - '{total} loadbalancers stats queried, {fail} failed' - .format(total=total, fail=total - added)) - - def _send_fails(self, failed_list): - with db_session() as session: - for device_name in failed_list: - data = self._get_lb(device_name, session) - if not data: - LOG.error( - 'Device {0} has no Loadbalancer attached during ' - 'statistics gathering'.format(device_name) - ) - continue - - LOG.error( - 'Load balancer failed statistics gathering request ' - 'ID: {0}\n' - 'IP: {1}\n' - 'tenant: {2}\n'.format( - data.id, data.floatingIpAddr, - data.tenantid)) - - def _get_lb(self, lb, session): - lb = session.query( - LoadBalancer.tenantid, Device.floatingIpAddr, Device.id - ).join(LoadBalancer.devices).\ - filter(Device.name == lb).first() - - return lb - - def start_stats_sched(self): - # Always try to hit the expected second mark for stats - seconds = datetime.datetime.now().second - if seconds < self.STATS_SECONDS: - sleeptime = self.STATS_SECONDS - seconds - else: - sleeptime = 60 - (seconds - self.STATS_SECONDS) - - LOG.info('LB stats timer sleeping for {secs} seconds' - .format(secs=sleeptime)) - self.stats_timer = threading.Timer(sleeptime, self.gather_stats, ()) - self.stats_timer.start() diff --git a/libra/api/__init__.py b/libra/api/__init__.py deleted file mode 100644 index 354525a2..00000000 --- a/libra/api/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - - -api_group = cfg.OptGroup('api', 'Libra API options') - -cfg.CONF.register_group(api_group) - -cfg.CONF.register_opts( - [ - cfg.ListOpt('db_sections', - required=True, - help='MySQL config sections in the config file'), - cfg.BoolOpt('disable_keystone', - default=False, - help='Unauthenticated server, for testing only'), - cfg.StrOpt('host', - default='0.0.0.0', - help='IP address to bind to, 0.0.0.0 for all IPs'), - cfg.ListOpt('ip_filters', - help='IP filters for backend nodes in the form ' - 'xxx.xxx.xxx.xxx/yy'), - cfg.StrOpt('keystone_module', - default='keystoneclient.middleware.auth_token:AuthProtocol', - help='A colon separated module and class for keystone ' - ' middleware'), - cfg.StrOpt('pid', - default='/var/run/libra/libra_api.pid', - help='PID file'), - cfg.IntOpt('port', - default=443, - help='Port number for API server'), - cfg.StrOpt('ssl_certfile', - help='Path to an SSL certificate file'), - cfg.StrOpt('ssl_keyfile', - help='Path to an SSL key file'), - cfg.StrOpt('swift_basepath', - required=True, - help='Default Swift container to place log files'), - cfg.StrOpt('swift_endpoint', - required=True, - help='Default endpoint URL (tenant ID will be appended' - ' to this)'), - ], - group=api_group -) diff --git a/libra/api/acl.py b/libra/api/acl.py deleted file mode 100644 index e82597a8..00000000 --- a/libra/api/acl.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ConfigParser -import importlib - -from oslo.config import cfg -from pecan import request - -from libra.api.library.exp import NotAuthorized -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -def get_limited_to_project(headers): - """Return the tenant the request should be limited to.""" - tenant_id = headers.get('X-Tenant-Id') - LOG.info( - 'Loadbalancers {0} request {1} ({2}) from {3} tenant {4}'.format( - request.environ.get('REQUEST_METHOD'), - request.environ.get('PATH_INFO'), - request.environ.get('QUERY_STRING'), - request.environ.get('REMOTE_ADDR'), - tenant_id - ) - ) - if not tenant_id: - raise NotAuthorized('No tenant ID provided by authentication system') - - return tenant_id - - -class AuthDirector(object): - """ There are some paths we want to work unauthenticated. This class - will direct intentionally unauthenticated requests to the relevant - controllers. """ - - def __init__(self, app): - self.unauthed_app = app - if not cfg.CONF['api']['disable_keystone']: - self.app = self._install() - else: - self.app = app - - def __call__(self, env, start_response): - uri = env['PATH_INFO'] - if uri == '/' or uri == '/v1.1' or uri == '/v1.1/': - return self.unauthed_app(env, start_response) - else: - return self.app(env, start_response) - - def _install(self): - """Install ACL check on application.""" - config = ConfigParser.SafeConfigParser() - config.read(cfg.CONF['config_file']) - module_details = cfg.CONF['api']['keystone_module'].split(':') - keystone = importlib.import_module(module_details[0]) - auth_class = getattr(keystone, module_details[1]) - return auth_class(self.unauthed_app, config._sections['keystone']) diff --git a/libra/api/app.py b/libra/api/app.py deleted file mode 100644 index 64212444..00000000 --- a/libra/api/app.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import eventlet -eventlet.monkey_patch() - -import daemon -import daemon.pidfile -import daemon.runner -import grp -import logging as std_logging -import pwd -import pecan -import sys -import wsme_overrides - -from eventlet import wsgi - -from libra import __version__ -from libra.api import config as api_config -from libra.api import model -from libra.api import acl -from libra.common.api import server -from libra.common.log import get_descriptors -from libra.common.options import CONF -from libra.common.options import add_common_opts -from libra.common.options import check_gearman_ssl_files -from libra.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -# Gets rid of pep8 error -assert wsme_overrides - - -def get_pecan_config(): - # Set up the pecan configuration - filename = api_config.__file__.replace('.pyc', '.py') - return pecan.configuration.conf_from_file(filename) - - -def setup_app(pecan_config): - - model.init_model() - - if not pecan_config: - pecan_config = get_pecan_config() - config = dict(pecan_config) - config['database'] = CONF['api']['db_sections'] - config['swift'] = { - 'swift_basepath': CONF['api']['swift_basepath'], - 'swift_endpoint': CONF['api']['swift_endpoint'] - } - config['gearman'] = { - 'server': CONF['gearman']['servers'], - 'ssl_key': CONF['gearman']['ssl_key'], - 'ssl_cert': CONF['gearman']['ssl_cert'], - 'ssl_ca': CONF['gearman']['ssl_ca'], - 'keepalive': CONF['gearman']['keepalive'], - 'keepcnt': CONF['gearman']['keepcnt'], - 'keepidle': CONF['gearman']['keepidle'], - 'keepintvl': CONF['gearman']['keepintvl'] - } - config['ip_filters'] = CONF['api']['ip_filters'] - if CONF['debug']: - config['wsme'] = {'debug': True} - config['app']['debug'] = True - - pecan.configuration.set_config(config, overwrite=True) - - app = pecan.make_app( - pecan_config.app.root, - static_root=pecan_config.app.static_root, - template_path=pecan_config.app.template_path, - debug=getattr(pecan_config.app, 'debug', False), - force_canonical=getattr(pecan_config.app, 'force_canonical', True), - guess_content_type_from_ext=getattr( - pecan_config.app, - 'guess_content_type_from_ext', - True) - ) - - final_app = acl.AuthDirector(app) - return final_app - - -class LogStdout(object): - def write(self, data): - if data.strip() != '': - LOG.info(data) - - # Gearman calls this - def flush(self): - pass - - -def main(): - add_common_opts() - CONF(project='libra', version=__version__) - - logging.setup('libra') - - LOG.debug('Configuration:') - CONF.log_opt_values(LOG, std_logging.DEBUG) - - pc = get_pecan_config() - - # NOTE: Let's not force anyone to actually have to use SSL, it shouldn't be - # up to us to decide. - sock = server.make_socket(CONF['api']['host'], - CONF['api']['port'], - CONF['api']['ssl_keyfile'], - CONF['api']['ssl_certfile']) - - if CONF['daemon']: - pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['api']['pid'], 10) - if daemon.runner.is_pidfile_stale(pidfile): - pidfile.break_lock() - - descriptors = get_descriptors() - descriptors.append(sock.fileno()) - - context = daemon.DaemonContext( - working_directory='/', - umask=0o022, - pidfile=pidfile, - files_preserve=descriptors - ) - if CONF['user']: - context.uid = pwd.getpwnam(CONF['user']).pw_uid - if CONF['group']: - context.gid = grp.getgrnam(CONF['group']).gr_gid - context.open() - - try: - check_gearman_ssl_files() - except Exception as e: - LOG.critical(str(e)) - return - - LOG.info('Starting on %s:%d', CONF.api.host, CONF.api.port) - api = setup_app(pc) - sys.stderr = LogStdout() - - wsgi.server(sock, api, keepalive=False, debug=CONF['debug']) - - return 0 diff --git a/libra/api/config.py b/libra/api/config.py deleted file mode 100644 index 42779a48..00000000 --- a/libra/api/config.py +++ /dev/null @@ -1,26 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Pecan Application Configurations -app = { - 'root': 'libra.api.controllers.root.RootController', - 'modules': ['libra.api'], - 'static_root': '%(confdir)s/public', - 'template_path': '%(confdir)s/api/templates', - 'errors': { - 404: '/notfound', - '__force_dict__': True - } -} diff --git a/libra/api/controllers/__init__.py b/libra/api/controllers/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/api/controllers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/api/controllers/connection_throttle.py b/libra/api/controllers/connection_throttle.py deleted file mode 100644 index 5682302e..00000000 --- a/libra/api/controllers/connection_throttle.py +++ /dev/null @@ -1,61 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import response -from pecan.rest import RestController - - -class ConnectionThrottleController(RestController): - """functions for /loadbalancers/{loadBalancerId}/connectionthrottle/* - routing""" - - def get(self, load_balancer_id): - """List connection throttling configuration. - - :param load_balancer_id: id of lb - - Url: - GET /loadbalancers/{load_balancer_id}/connectionthrottle - - Returns: dict - """ - response.status = 201 - return None - - def post(self, load_balancer_id, *args): - """Update throttling configuration. - - :param load_balancer_id: id of lb - :param *args: holds the posted json or xml - - Url: - PUT /loadbalancers/loadBalancerId/connectionthrottle - - Returns: dict - """ - response.status = 201 - return None - - def delete(self, loadbalancer_id): - """Remove connection throttling configurations. - - :param load_balancer_id: id of lb - - Url: - DELETE /loadbalancers/loadBalancerId/connectionthrottle - - Returns: void - """ - response.status = 201 diff --git a/libra/api/controllers/health_monitor.py b/libra/api/controllers/health_monitor.py deleted file mode 100644 index 0ba693f6..00000000 --- a/libra/api/controllers/health_monitor.py +++ /dev/null @@ -1,306 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import request, response -from pecan.rest import RestController -import wsmeext.pecan as wsme_pecan -from wsme.exc import ClientSideError -from wsme import Unset -from urllib import quote -from libra.common.api.lbaas import LoadBalancer, db_session -from libra.common.api.lbaas import Device, HealthMonitor, Counters -from libra.api.acl import get_limited_to_project -from libra.api.model.validators import LBMonitorPut, LBMonitorResp -from libra.common.api.gearman_client import submit_job -from libra.api.library.exp import NotFound, ImmutableEntity, ImmutableStates - - -class HealthMonitorController(RestController): - - TIMEOUT_LIMIT = 3600 - DELAY_LIMIT = 3600 - PATH_LIMIT = 2000 - - """functions for /loadbalancers/{loadBalancerId}/healthmonitor routing""" - def __init__(self, load_balancer_id=None): - self.lbid = load_balancer_id - - @wsme_pecan.wsexpose(None) - def get(self): - """Retrieve the health monitor configuration, if one exists. - Url: - GET /loadbalancers/{load_balancer_id}/healthmonitor - - Returns: dict - """ - if not self.lbid: - raise ClientSideError('Load Balancer ID has not been supplied') - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - # grab the lb - monitor = session.query( - HealthMonitor.type, HealthMonitor.delay, - HealthMonitor.timeout, HealthMonitor.attempts, - HealthMonitor.path - ).join(LoadBalancer.monitors).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status != 'DELETED').\ - first() - - response.status = 200 - if monitor is None: - session.rollback() - return {} - - monitor_data = { - 'type': monitor.type, - 'delay': monitor.delay, - 'timeout': monitor.timeout, - 'attemptsBeforeDeactivation': monitor.attempts - } - - if monitor.path: - monitor_data['path'] = monitor.path - - counter = session.query(Counters).\ - filter(Counters.name == 'api_healthmonitor_get').first() - counter.value += 1 - - session.commit() - return monitor_data - - @wsme_pecan.wsexpose(LBMonitorResp, body=LBMonitorPut, status_code=202) - def put(self, body=None): - """Update the settings for a health monitor. - - :param load_balancer_id: id of lb - :param *args: holds the posted json or xml data - - Url: - PUT /loadbalancers/{load_balancer_id}/healthmonitor - - Returns: dict - """ - if not self.lbid: - raise ClientSideError('Load Balancer ID has not been supplied') - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - # grab the lb - query = session.query(LoadBalancer, HealthMonitor).\ - outerjoin(LoadBalancer.monitors).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status != 'DELETED').first() - - if query is None: - session.rollback() - raise NotFound("Load Balancer not found") - - lb, monitor = query - - if lb is None: - session.rollback() - raise NotFound('Load Balancer not found') - - # Check inputs - if ( - body.type == Unset or - body.type is None or - body.delay == Unset or - body.delay is None or - body.timeout == Unset or - body.timeout is None or - body.attemptsBeforeDeactivation == Unset or - body.attemptsBeforeDeactivation is None - ): - session.rollback() - raise ClientSideError( - "Missing field(s): {0}, {1}, {2}, and {3} are required" - .format("type", "delay", "timeout", - "attemptsBeforeDeactivation") - ) - - data = { - "lbid": self.lbid, - "type": body.type, - "delay": int(body.delay), - "timeout": int(body.timeout), - "attempts": int(body.attemptsBeforeDeactivation) - } - - # Path only required when type is not CONNECT - if body.path != Unset and body.path is not None: - if body.type == "CONNECT": - session.rollback() - raise ClientSideError( - "Path argument is invalid with CONNECT type" - ) - # Encode everything apart from allowed characters - # This ignore list in the second parameter is everything in - # RFC3986 section 2 that isn't already ignored by - # urllib.quote() - data["path"] = quote(body.path, "/~+*,;:!$'[]()?&=#%") - # If path is empty, set to / - if len(data["path"]) == 0 or data["path"][0] != "/": - session.rollback() - raise ClientSideError( - "Path must begin with leading /" - ) - - if len(data["path"]) > self.PATH_LIMIT: - raise ClientSideError( - "Path must be less than {0} characters" - .format(self.PATH_LIMIT) - ) - else: - if body.type != "CONNECT": - session.rollback() - raise ClientSideError( - "Path argument is required" - ) - data["path"] = None - - # Check timeout limits. Must be > 0 and limited to 1 hour - if data["timeout"] < 1 or data["timeout"] > self.TIMEOUT_LIMIT: - session.rollback() - raise ClientSideError( - "timeout must be between 1 and {0} seconds" - .format(self.TIMEOUT_LIMIT) - ) - - # Check delay limits. Must be > 0 and limited to 1 hour - if data["delay"] < 1 or data["delay"] > self.DELAY_LIMIT: - session.rollback() - raise ClientSideError( - "delay must be between 1 and {0} seconds" - .format(self.DELAY_LIMIT) - ) - - if data["timeout"] > data["delay"]: - session.rollback() - raise ClientSideError( - "timeout cannot be greater than delay" - ) - - if (data["attempts"] < 1 or data["attempts"] > 10): - session.rollback() - raise ClientSideError( - "attemptsBeforeDeactivation must be between 1 and 10" - ) - - if monitor is None: - # This is ok for LBs that already existed without - # monitoring. Create a new entry. - monitor = HealthMonitor( - lbid=self.lbid, type=data["type"], delay=data["delay"], - timeout=data["timeout"], attempts=data["attempts"], - path=data["path"] - ) - session.add(monitor) - else: - # Modify the existing entry. - monitor.type = data["type"] - monitor.delay = data["delay"] - monitor.timeout = data["timeout"] - monitor.attempts = data["attempts"] - monitor.path = data["path"] - - if lb.status in ImmutableStates: - session.rollback() - raise ImmutableEntity( - 'Cannot modify a Load Balancer in a non-ACTIVE state' - ', current state: {0}' - .format(lb.status) - ) - - lb.status = 'PENDING_UPDATE' - device = session.query( - Device.id, Device.name, Device.status - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == self.lbid).\ - first() - - return_data = LBMonitorResp() - return_data.type = data["type"] - return_data.delay = str(data["delay"]) - return_data.timeout = str(data["timeout"]) - return_data.attemptsBeforeDeactivation =\ - str(data["attempts"]) - if ((data["path"] is not None) and (len(data["path"]) > 0)): - return_data.path = data["path"] - - counter = session.query(Counters).\ - filter(Counters.name == 'api_healthmonitor_modify').first() - counter.value += 1 - session.commit() - submit_job( - 'UPDATE', device.name, device.id, lb.id - ) - return return_data - - @wsme_pecan.wsexpose(None, status_code=202) - def delete(self): - """Remove the health monitor. - - :param load_balancer_id: id of lb - - Url: - DELETE /loadbalancers/{load_balancer_id}/healthmonitor - - Returns: void - """ - if not self.lbid: - raise ClientSideError('Load Balancer ID has not been supplied') - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - query = session.query( - LoadBalancer, HealthMonitor - ).outerjoin(LoadBalancer.monitors).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.status != 'DELETED').\ - first() - - if query is None: - session.rollback() - raise NotFound("Load Balancer not found") - - lb, monitor = query - - if lb is None: - session.rollback() - raise NotFound("Load Balancer not found") - - if monitor is not None: - session.delete(monitor) - session.flush() - - device = session.query( - Device.id, Device.name - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == self.lbid).\ - first() - counter = session.query(Counters).\ - filter(Counters.name == 'api_healthmonitor.delete').first() - counter.value += 1 - session.commit() - submit_job( - 'UPDATE', device.name, device.id, self.lbid - ) - return None diff --git a/libra/api/controllers/limits.py b/libra/api/controllers/limits.py deleted file mode 100644 index f4fa6943..00000000 --- a/libra/api/controllers/limits.py +++ /dev/null @@ -1,47 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose, request -from pecan.rest import RestController -from libra.api.acl import get_limited_to_project -from libra.common.api.lbaas import Limits, Counters, TenantLimits, db_session - - -class LimitsController(RestController): - @expose('json') - def get(self): - resp = {} - tenant_id = get_limited_to_project(request.headers) - - with db_session() as session: - limits = session.query(Limits).all() - - # Get per-tenant values - tenant_lblimit = session.query(TenantLimits.loadbalancers).\ - filter(TenantLimits.tenantid == tenant_id).scalar() - - for limit in limits: - resp[limit.name] = limit.value - - # Set per-tenant values - if tenant_lblimit: - resp['maxLoadBalancers'] = tenant_lblimit - - resp = {"limits": {"absolute": {"values": resp}}} - counter = session.query(Counters).\ - filter(Counters.name == 'api_limits_get').first() - counter.value += 1 - session.commit() - return resp diff --git a/libra/api/controllers/load_balancers.py b/libra/api/controllers/load_balancers.py deleted file mode 100644 index b128f03a..00000000 --- a/libra/api/controllers/load_balancers.py +++ /dev/null @@ -1,808 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -# pecan imports -from pecan import expose, abort, response, request -from pecan.rest import RestController -import wsmeext.pecan as wsme_pecan -from wsme.exc import ClientSideError -from wsme import Unset -# other controllers -from nodes import NodesController -from virtualips import VipsController -from health_monitor import HealthMonitorController -from logs import LogsController - -# models -from libra.common.api.lbaas import LoadBalancer, Device, Node, db_session -from libra.common.api.lbaas import TenantLimits -from libra.common.api.lbaas import loadbalancers_devices, Limits, Vip, Ports -from libra.common.api.lbaas import HealthMonitor, Counters -from libra.common.exc import ExhaustedError -from libra.api.model.validators import LBPut, LBPost, LBResp, LBVipResp -from libra.api.model.validators import LBRespNode, LBOptions -from libra.common.api.gearman_client import submit_job -from libra.api.acl import get_limited_to_project -from libra.api.library.exp import OverLimit, IPOutOfRange, NotFound -from libra.api.library.exp import ImmutableEntity, ImmutableStates -from libra.api.library.exp import ImmutableStatesNoError -from libra.api.library.ip_filter import ipfilter -from pecan import conf -from wsme import types as wtypes - - -class LoadBalancersController(RestController): - - LB_TIMEOUT_MS = 30000 - LB_TIMEOUT_MAX = 1000000 - LB_RETRIES = 3 - LB_RETRIES_MAX = 256 - - def __init__(self, lbid=None): - self.lbid = lbid - - @wsme_pecan.wsexpose(None, wtypes.text) - def get(self, status=None): - """Fetches a list of load balancers or the details of one balancer if - load_balancer_id is not empty. - - :param load_balancer_id: id of lb we want to get, if none it returns a - list of all - - Url: - GET /loadbalancers - List all load balancers configured for the account. - - Url: - GET /loadbalancers/{load_balancer_id} - List details of the specified load balancer. - - Returns: dict - """ - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - # if we don't have an id then we want a list of them own by this - # tenent - if not self.lbid: - if status and status == 'DELETED': - lbs = session.query( - LoadBalancer.name, LoadBalancer.id, - LoadBalancer.protocol, - LoadBalancer.port, LoadBalancer.algorithm, - LoadBalancer.status, LoadBalancer.created, - LoadBalancer.updated, LoadBalancer.timeout, - LoadBalancer.retries - ).filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status == 'DELETED').all() - else: - lbs = session.query( - LoadBalancer.name, LoadBalancer.id, - LoadBalancer.protocol, - LoadBalancer.port, LoadBalancer.algorithm, - LoadBalancer.status, LoadBalancer.created, - LoadBalancer.updated, LoadBalancer.timeout, - LoadBalancer.retries - ).filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status != 'DELETED').all() - load_balancers = {'loadBalancers': []} - - for lb in lbs: - lb = lb._asdict() - lb['nodeCount'] = session.query(Node).\ - filter(Node.lbid == lb['id']).count() - lb['id'] = str(lb['id']) - - # Unset options get set to default values - lb['options'] = {} - if lb['timeout']: - lb['options']['timeout'] = lb['timeout'] - else: - lb['options']['timeout'] = self.LB_TIMEOUT_MS - if lb['retries']: - lb['options']['retries'] = lb['retries'] - else: - lb['options']['retries'] = self.LB_RETRIES - del(lb['timeout']) - del(lb['retries']) - - load_balancers['loadBalancers'].append(lb) - else: - load_balancers = session.query( - LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, - LoadBalancer.port, LoadBalancer.algorithm, - LoadBalancer.status, LoadBalancer.created, - LoadBalancer.updated, LoadBalancer.errmsg, - LoadBalancer.timeout, LoadBalancer.retries, - Vip.id.label('vipid'), Vip.ip - ).join(LoadBalancer.devices).\ - outerjoin(Device.vip).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - first() - - if not load_balancers: - session.rollback() - raise NotFound("Load Balancer ID not found") - - load_balancers = load_balancers._asdict() - load_balancers['nodeCount'] = session.query(Node).\ - filter(Node.lbid == load_balancers['id']).count() - - if load_balancers['vipid']: - load_balancers['virtualIps'] = [{ - "id": load_balancers['vipid'], - "type": "PUBLIC", - "ipVersion": "IPV4", - "address": str(ipaddress.IPv4Address( - load_balancers['ip'] - )), - }] - del(load_balancers['ip']) - del(load_balancers['vipid']) - else: - # We are still assigning a VIP - load_balancers['virtualIps'] = [{ - "id": None, - "type": "ASSIGNING", - "ipVersion": "IPV4", - "address": None - }] - del(load_balancers['vipid']) - nodes = session.query( - Node.id, Node.address, Node.port, Node.status, - Node.enabled, Node.weight - ).join(LoadBalancer.nodes).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - all() - - load_balancers['id'] = str(load_balancers['id']) - if not load_balancers['errmsg']: - load_balancers['statusDescription'] = '' - else: - load_balancers['statusDescription'] =\ - load_balancers['errmsg'] - del(load_balancers['errmsg']) - - load_balancers['nodes'] = [] - for item in nodes: - node = item._asdict() - if node['enabled'] == 1: - node['condition'] = 'ENABLED' - else: - node['condition'] = 'DISABLED' - del node['enabled'] - node['port'] = str(node['port']) - node['id'] = str(node['id']) - if node['weight'] == 1: - del node['weight'] - load_balancers['nodes'].append(node) - - # Unset options get set to default values - load_balancers['options'] = {} - if load_balancers['timeout']: - load_balancers['options']['timeout'] =\ - load_balancers['timeout'] - else: - load_balancers['options']['timeout'] = self.LB_TIMEOUT_MS - if load_balancers['retries']: - load_balancers['options']['retries'] =\ - load_balancers['retries'] - else: - load_balancers['options']['retries'] = self.LB_RETRIES - del(load_balancers['timeout']) - del(load_balancers['retries']) - - counter = session.query(Counters).\ - filter(Counters.name == 'api_loadbalancers_get').first() - counter.value += 1 - session.commit() - response.status = 200 - return load_balancers - - @wsme_pecan.wsexpose(LBResp, body=LBPost, status_code=202) - def post(self, body=None): - """Accepts edit if load_balancer_id isn't blank or create load balancer - posts. - - :param load_balancer_id: id of lb - :param *args: holds the posted json or xml data - - Urls: - POST /loadbalancers/{load_balancer_id} - PUT /loadbalancers - - Notes: - curl -i -H "Accept: application/json" -X POST \ - -d "data={"name": "my_lb"}" \ - http://dev.server:8080/loadbalancers/100 - - Returns: dict - """ - tenant_id = get_limited_to_project(request.headers) - if body.nodes == Unset or not len(body.nodes): - raise ClientSideError( - 'At least one backend node needs to be supplied' - ) - - # When the load balancer is used for Galera, we need to do some - # sanity checking of the nodes to make sure 1 and only 1 node is - # defined as the primary node. - if body.protocol and body.protocol.lower() == 'galera': - is_galera = True - else: - is_galera = False - num_galera_primary_nodes = 0 - - for node in body.nodes: - if node.address == Unset: - raise ClientSideError( - 'A supplied node has no address' - ) - if node.port == Unset: - raise ClientSideError( - 'Node {0} is missing a port'.format(node.address) - ) - if node.port < 1 or node.port > 65535: - raise ClientSideError( - 'Node {0} port number {1} is invalid' - .format(node.address, node.port) - ) - - try: - node.address = ipfilter(node.address, conf.ip_filters) - except IPOutOfRange: - raise ClientSideError( - 'IP Address {0} is not allowed as a backend node' - .format(node.address) - ) - except: - raise ClientSideError( - 'IP Address {0} not valid'.format(node.address) - ) - - if node.weight != Unset: - try: - weight = int(node.weight) - except ValueError: - raise ClientSideError( - 'Node weight must be an integer' - ) - if weight < 1 or weight > 256: - raise ClientSideError( - 'Node weight must be between 1 and 256' - ) - - is_backup = False - if node.backup != Unset and node.backup == 'TRUE': - is_backup = True - if is_galera and not is_backup: - num_galera_primary_nodes += 1 - - # Options defaults - timeout_ms = self.LB_TIMEOUT_MS - retries = self.LB_RETRIES - if body.options: - if body.options.timeout != Unset: - try: - timeout_ms = int(body.options.timeout) - if timeout_ms < 0 or timeout_ms > self.LB_TIMEOUT_MAX: - raise ClientSideError( - 'timeout must be between 0 and {0} ms' - .format(self.LB_TIMEOUT_MAX) - ) - except ValueError: - raise ClientSideError( - 'timeout must be an integer' - ) - if body.options.retries != Unset: - try: - retries = int(body.options.retries) - if retries < 0 or retries > self.LB_RETRIES_MAX: - raise ClientSideError( - 'retries must be between 0 and {0}' - .format(self.LB_RETRIES_MAX) - ) - except ValueError: - raise ClientSideError( - 'retries must be an integer' - ) - - # Galera sanity checks - if is_galera and num_galera_primary_nodes != 1: - raise ClientSideError( - 'Galera load balancer must have exactly one primary node' - ) - - with db_session() as session: - lblimit = session.query(Limits.value).\ - filter(Limits.name == 'maxLoadBalancers').scalar() - nodelimit = session.query(Limits.value).\ - filter(Limits.name == 'maxNodesPerLoadBalancer').scalar() - namelimit = session.query(Limits.value).\ - filter(Limits.name == 'maxLoadBalancerNameLength').scalar() - count = session.query(LoadBalancer).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status != 'DELETED').count() - ports = session.query(Ports.protocol, Ports.portnum).\ - filter(Ports.enabled == 1).all() - - # Allow per-tenant LB limit, defaulting to the global limit if - # the per-tenant value is not set. - tenant_lblimit = session.query(TenantLimits.loadbalancers).\ - filter(TenantLimits.tenantid == tenant_id).scalar() - if tenant_lblimit: - lblimit = tenant_lblimit - - if len(body.name) > namelimit: - session.rollback() - raise ClientSideError( - 'Length of Load Balancer name too long' - ) - # TODO: this should probably be a 413, not sure how to do that yet - if count >= lblimit: - session.rollback() - raise OverLimit( - 'Account has hit limit of {0} Load Balancers'. - format(lblimit) - ) - if len(body.nodes) > nodelimit: - session.rollback() - raise OverLimit( - 'Too many backend nodes supplied (limit is {0})'. - format(nodelimit) - ) - - device = None - old_lb = None - # if we don't have an id then we want to create a new lb - lb = LoadBalancer() - lb.tenantid = tenant_id - lb.name = body.name - if body.protocol: - if body.protocol.lower() in ('tcp', 'http', 'galera'): - lb.protocol = body.protocol.upper() - else: - raise ClientSideError( - 'Invalid protocol %s' % body.protocol - ) - else: - lb.protocol = 'HTTP' - - if body.port: - if body.port < 1 or body.port > 65535: - raise ClientSideError( - 'Port number {0} is invalid'.format(body.port) - ) - # Make sure the port is valid and enabled - valid = False - for item in ports: - item = item._asdict() - if(lb.protocol == item["protocol"].upper() and - body.port == item["portnum"]): - valid = True - if valid: - lb.port = body.port - else: - raise ClientSideError( - 'Port number {0} is not allowed for {1} protocol' - .format(body.port, lb.protocol) - ) - else: - if lb.protocol == 'HTTP': - lb.port = 80 - elif lb.protocol == 'TCP': - lb.port = 443 - elif lb.protocol == 'GALERA': - lb.port = 3306 - - lb.status = 'BUILD' - lb.created = None - - if body.virtualIps == Unset: - # find free device - # lock with "for update" so multiple APIs don't grab the same - # LB - device = session.query(Device).\ - filter(~Device.id.in_( - session.query(loadbalancers_devices.c.device) - )).\ - filter(Device.status == "OFFLINE").\ - filter(Device.pingCount == 0).\ - with_lockmode('update').\ - first() - if device is None: - session.rollback() - raise ExhaustedError('No devices available') - - vip = None - else: - virtual_id = body.virtualIps[0].id - # Make sure virtual ID is actually an int - try: - virtual_id = int(virtual_id) - except: - session.rollback() - raise NotFound('Invalid virtual IP provided') - # This is an additional load balancer - device = session.query( - Device - ).join(Device.vip).\ - filter(Vip.id == virtual_id).\ - first() - - old_lb = session.query( - LoadBalancer - ).join(LoadBalancer.devices).\ - join(Device.vip).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(Vip.id == virtual_id).\ - first() - - if old_lb.status in ImmutableStates: - session.rollback() - raise ImmutableEntity( - 'Existing Load Balancer on VIP in a non-ACTIVE state' - ', current state: {0}' - .format(old_lb.status) - ) - - vip = session.query(Vip).\ - filter(Vip.device == device.id).\ - first() - if old_lb is None: - session.rollback() - raise NotFound('Invalid virtual IP provided') - - old_count = session.query( - LoadBalancer - ).join(LoadBalancer.devices).\ - join(Device.vip).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(Vip.id == virtual_id).\ - filter(LoadBalancer.port == lb.port).\ - count() - if old_count: - session.rollback() - # Error, can have only one LB per port on a device - raise ClientSideError( - 'Only one load balancer per port allowed per device' - ) - - if lb.protocol == 'HTTP': - protocol_count = session.query( - LoadBalancer - ).join(LoadBalancer.devices).\ - join(Device.vip).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(Vip.id == virtual_id).\ - filter(LoadBalancer.protocol == lb.protocol).\ - count() - else: - # TCP or GALERA. Both are TCP really - protocol_count = session.query( - LoadBalancer - ).join(LoadBalancer.devices).\ - join(Device.vip).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(Vip.id == virtual_id).\ - filter((LoadBalancer.protocol == 'TCP') | - (LoadBalancer.protocol == 'GALERA')).\ - count() - - if protocol_count: - session.rollback() - # Error, can have only one LB per protocol on a device - raise ClientSideError( - 'Only one load balancer per protocol' - ' allowed per device' - ) - - if body.algorithm: - lb.algorithm = body.algorithm.upper() - else: - lb.algorithm = 'ROUND_ROBIN' - - lb.timeout = timeout_ms - lb.retries = retries - - lb.devices = [device] - # write to database - session.add(lb) - session.flush() - # refresh the lb record so we get the id back - session.refresh(lb) - for node in body.nodes: - if node.condition == 'DISABLED': - enabled = 0 - node_status = 'OFFLINE' - else: - enabled = 1 - node_status = 'ONLINE' - - if node.backup == 'TRUE': - backup = 1 - else: - backup = 0 - - weight = 1 - if node.weight != Unset: - weight = node.weight - out_node = Node( - lbid=lb.id, port=node.port, address=node.address, - enabled=enabled, status=node_status, - weight=weight, backup=backup - ) - session.add(out_node) - - # now save the loadbalancer_id to the device and switch its status - # to build so the monitoring does not trigger early. - # The gearman message code will switch to ONLINE once we know - # everything is good - device.status = "BUILD" - session.flush() - - return_data = LBResp() - return_data.id = str(lb.id) - return_data.name = lb.name - return_data.protocol = lb.protocol - return_data.port = str(lb.port) - return_data.algorithm = lb.algorithm - return_data.status = lb.status - return_data.created = lb.created - return_data.updated = lb.updated - if vip: - vip_resp = LBVipResp( - address=str(ipaddress.IPv4Address(vip.ip)), - id=str(vip.id), type='PUBLIC', ipVersion='IPV4' - ) - else: - vip_resp = LBVipResp( - address=None, id=None, type='ASSIGNING', ipVersion='IPV4' - ) - return_data.virtualIps = [vip_resp] - - nodes = session.query( - Node.id, Node.address, Node.port, Node.status, - Node.enabled, Node.weight - ).join(LoadBalancer.nodes).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == lb.id).\ - all() - - return_data.nodes = [] - for node in nodes: - if node.enabled == 1: - condition = 'ENABLED' - else: - condition = 'DISABLED' - - if node.weight == 1: - return_data.nodes.append( - LBRespNode( - id=str(node.id), port=str(node.port), - address=node.address, condition=condition, - status=node.status - ) - ) - else: - return_data.nodes.append( - LBRespNode( - id=str(node.id), port=str(node.port), - address=node.address, condition=condition, - status=node.status, weight=str(node.weight) - ) - ) - - return_data.options = LBOptions(timeout=timeout_ms, - retries=retries) - - counter = session.query(Counters).\ - filter(Counters.name == 'api_loadbalancers_create').first() - counter.value += 1 - session.commit() - # trigger gearman client to create new lb - submit_job( - 'UPDATE', device.name, device.id, lb.id - ) - - return return_data - - @wsme_pecan.wsexpose(None, body=LBPut, status_code=202) - def put(self, body=None): - if not self.lbid: - raise ClientSideError('Load Balancer ID is required') - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - # grab the lb - lb = session.query(LoadBalancer).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status != 'DELETED').first() - - if lb is None: - session.rollback() - raise NotFound('Load Balancer ID is not valid') - - if lb.status in ImmutableStates: - session.rollback() - raise ImmutableEntity( - 'Cannot modify a Load Balancer in a non-ACTIVE state' - ', current state: {0}' - .format(lb.status) - ) - - if body.name != Unset: - namelimit = session.query(Limits.value).\ - filter(Limits.name == 'maxLoadBalancerNameLength').scalar() - if len(body.name) > namelimit: - session.rollback() - raise ClientSideError( - 'Length of Load Balancer name too long' - ) - lb.name = body.name - - if body.algorithm != Unset: - lb.algorithm = body.algorithm - - if body.options: - if body.options.timeout != Unset: - try: - timeout_ms = int(body.options.timeout) - if timeout_ms < 0 or timeout_ms > self.LB_TIMEOUT_MAX: - raise ClientSideError( - 'timeout must be between 0 and {0} ms' - .format(self.LB_TIMEOUT_MAX) - ) - lb.timeout = timeout_ms - except ValueError: - raise ClientSideError( - 'timeout must be an integer' - ) - if body.options.retries != Unset: - try: - retries = int(body.options.retries) - if retries < 0 or retries > self.LB_RETRIES_MAX: - raise ClientSideError( - 'retries must be between 0 and {0}' - .format(self.LB_RETRIES_MAX) - ) - lb.retries = retries - except ValueError: - raise ClientSideError( - 'retries must be an integer' - ) - - lb.status = 'PENDING_UPDATE' - device = session.query( - Device.id, Device.name, Device.status - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == self.lbid).\ - first() - counter = session.query(Counters).\ - filter(Counters.name == 'api_loadbalancers_modify').first() - counter.value += 1 - session.commit() - submit_job( - 'UPDATE', device.name, device.id, lb.id - ) - return '' - - @wsme_pecan.wsexpose(None, status_code=202) - def delete(self): - """Remove a load balancer from the account. - - :param load_balancer_id: id of lb - - Urls: - DELETE /loadbalancers/{load_balancer_id} - - Notes: - curl -i -H "Accept: application/json" -X DELETE - http://dev.server:8080/loadbalancers/1 - - Returns: None - """ - load_balancer_id = self.lbid - tenant_id = get_limited_to_project(request.headers) - # grab the lb - with db_session() as session: - lb = session.query(LoadBalancer).\ - filter(LoadBalancer.id == load_balancer_id).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status != 'DELETED').first() - - if lb is None: - session.rollback() - raise NotFound("Load Balancer ID is not valid") - # So we can delete ERROR, but not other Immutable states - if lb.status in ImmutableStatesNoError: - session.rollback() - raise ImmutableEntity( - 'Cannot delete a Load Balancer in a non-ACTIVE state' - ', current state: {0}' - .format(lb.status) - ) - lb.status = 'PENDING_DELETE' - device = session.query( - Device.id, Device.name - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == load_balancer_id).\ - first() - counter = session.query(Counters).\ - filter(Counters.name == 'api_loadbalancers_delete').first() - counter.value += 1 - - if device is None: - # This can happen if a device was manually deleted from the DB - lb.status = 'DELETED' - session.execute(loadbalancers_devices.delete().where( - loadbalancers_devices.c.loadbalancer == lb.id - )) - session.query(Node).\ - filter(Node.lbid == lb.id).delete() - session.query(HealthMonitor).\ - filter(HealthMonitor.lbid == lb.id).delete() - session.commit() - else: - session.commit() - submit_job( - 'DELETE', device.name, device.id, lb.id - ) - - return None - - def usage(self, load_balancer_id): - """List current and historical usage. - - :param load_balancer_id: id of lb - - Url: - GET /loadbalancers/{load_balancer_id}/usage - - Returns: dict - """ - response.status = 201 - return None - - @expose('json') - def _lookup(self, lbid, *remainder): - """Routes more complex url mapping. - - Most things are /loadbalancer/{id}/function/... so this routes that - - Raises: 404 - """ - try: - lbid = int(lbid) - except ValueError: - raise abort(404) - - if len(remainder): - if remainder[0] == 'nodes': - return NodesController(lbid), remainder[1:] - if remainder[0] == 'virtualips': - return VipsController(lbid), remainder[1:] - if remainder[0] == 'logs': - return LogsController(lbid), remainder[1:] - if remainder[0] == 'healthmonitor': - return HealthMonitorController(lbid), remainder[1:] - - # Kludgy fix for PUT since WSME doesn't like IDs on the path - elif lbid: - return LoadBalancersController(lbid), remainder - abort(404) diff --git a/libra/api/controllers/logs.py b/libra/api/controllers/logs.py deleted file mode 100644 index b4349f26..00000000 --- a/libra/api/controllers/logs.py +++ /dev/null @@ -1,93 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import request -from pecan import conf -from pecan.rest import RestController -import wsmeext.pecan as wsme_pecan -from wsme.exc import ClientSideError -from wsme import Unset -from libra.common.api.lbaas import LoadBalancer, Device, db_session, Counters -from libra.api.acl import get_limited_to_project -from libra.api.model.validators import LBLogsPost -from libra.common.api.gearman_client import submit_job -from libra.api.library.exp import NotFound, ImmutableEntity, ImmutableStates - - -class LogsController(RestController): - def __init__(self, load_balancer_id=None): - self.lbid = load_balancer_id - - @wsme_pecan.wsexpose(None, body=LBLogsPost, status_code=202) - def post(self, body=None): - if self.lbid is None: - raise ClientSideError('Load Balancer ID has not been supplied') - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - load_balancer = session.query(LoadBalancer).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.status != 'DELETED').\ - first() - if load_balancer is None: - session.rollback() - raise NotFound('Load Balancer not found') - - if load_balancer.status in ImmutableStates: - session.rollback() - raise ImmutableEntity( - 'Cannot get logs from a Load Balancer in a non-ACTIVE ' - 'state, current state: {0}'.format(load_balancer.status) - ) - - load_balancer.status = 'PENDING_UPDATE' - device = session.query( - Device.id, Device.name, Device.status - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == self.lbid).\ - first() - counter = session.query(Counters).\ - filter(Counters.name == 'api_log_archive').first() - counter.value += 1 - session.commit() - data = { - 'deviceid': device.id - } - if body.objectStoreType != Unset: - data['objectStoreType'] = body.objectStoreType.lower() - else: - data['objectStoreType'] = 'swift' - - if body.objectStoreBasePath != Unset: - data['objectStoreBasePath'] = body.objectStoreBasePath - else: - data['objectStoreBasePath'] = conf.swift.swift_basepath - - if body.objectStoreEndpoint != Unset: - data['objectStoreEndpoint'] = body.objectStoreEndpoint - else: - data['objectStoreEndpoint'] = '{0}/{1}'.\ - format(conf.swift.swift_endpoint.rstrip('/'), tenant_id) - - if body.authToken != Unset: - data['authToken'] = body.authToken - else: - data['authToken'] = request.headers.get('X-Auth-Token') - - submit_job( - 'ARCHIVE', device.name, data, self.lbid - ) - return diff --git a/libra/api/controllers/nodes.py b/libra/api/controllers/nodes.py deleted file mode 100644 index 81047390..00000000 --- a/libra/api/controllers/nodes.py +++ /dev/null @@ -1,440 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose, response, request, abort -from pecan.rest import RestController -import wsmeext.pecan as wsme_pecan -from wsme.exc import ClientSideError -from wsme import Unset -# default response objects -from libra.common.api.lbaas import LoadBalancer, Node, db_session, Limits -from libra.common.api.lbaas import Device, Counters -from libra.api.acl import get_limited_to_project -from libra.api.model.validators import LBNodeResp, LBNodePost, NodeResp -from libra.api.model.validators import LBNodePut -from libra.common.api.gearman_client import submit_job -from libra.api.library.exp import OverLimit, IPOutOfRange, NotFound -from libra.api.library.exp import ImmutableEntity, ImmutableStates -from libra.api.library.ip_filter import ipfilter -from pecan import conf - - -class NodesController(RestController): - """Functions for /loadbalancers/{load_balancer_id}/nodes/* routing""" - def __init__(self, lbid, nodeid=None): - self.lbid = lbid - self.nodeid = nodeid - - @wsme_pecan.wsexpose(None) - def get(self): - """List node(s) configured for the load balancer OR if - node_id == None .. Retrieve the configuration of node {node_id} of - loadbalancer {load_balancer_id}. - :param load_balancer_id: id of lb - :param node_id: id of node (optional) - - Urls: - GET /loadbalancers/{load_balancer_id}/nodes - GET /loadbalancers/{load_balancer_id}/nodes/{node_id} - - Returns: dict - """ - tenant_id = get_limited_to_project(request.headers) - - if not self.lbid: - raise ClientSideError('Load Balancer ID not supplied') - with db_session() as session: - if not self.nodeid: - nodes = session.query( - Node.id, Node.address, Node.port, Node.status, - Node.enabled, Node.weight - ).join(LoadBalancer.nodes).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.status != 'DELETED').\ - all() - - node_response = {'nodes': []} - for item in nodes: - node = item._asdict() - if node['enabled'] == 1: - node['condition'] = 'ENABLED' - else: - node['condition'] = 'DISABLED' - del node['enabled'] - if node['weight'] == 1: - del node['weight'] - node_response['nodes'].append(node) - - else: - node = session.query( - Node.id, Node.address, Node.port, Node.status, - Node.enabled, Node.weight - ).join(LoadBalancer.nodes).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - filter(Node.id == self.nodeid).\ - first() - - if node is None: - session.rollback() - raise NotFound('node not found') - - node_response = node._asdict() - if node_response['enabled'] == 1: - node_response['condition'] = 'ENABLED' - else: - node_response['condition'] = 'DISABLED' - del node_response['enabled'] - if node_response['weight'] == 1: - del node_response['weight'] - counter = session.query(Counters).\ - filter(Counters.name == 'api_node_get').first() - counter.value += 1 - session.commit() - response.status = 200 - return node_response - - @wsme_pecan.wsexpose(LBNodeResp, body=LBNodePost, status_code=202) - def post(self, body=None): - """Adds a new node to the load balancer OR Modify the configuration - of a node on the load balancer. - - :param load_balancer_id: id of lb - :param node_id: id of node (optional) when missing a new node is added. - :param *args: holds the posted json or xml data - - Urls: - POST /loadbalancers/{load_balancer_id}/nodes - PUT /loadbalancers/{load_balancer_id}/nodes/{node_id} - - Returns: dict of the full list of nodes or the details of the single - node - """ - tenant_id = get_limited_to_project(request.headers) - if self.lbid is None: - raise ClientSideError('Load Balancer ID has not been supplied') - - if body.nodes == Unset or not len(body.nodes): - raise ClientSideError('No nodes have been supplied') - - for node in body.nodes: - if node.address == Unset: - raise ClientSideError( - 'A supplied node has no address' - ) - if node.port == Unset: - raise ClientSideError( - 'Node {0} is missing a port'.format(node.address) - ) - if node.port < 1 or node.port > 65535: - raise ClientSideError( - 'Node {0} port number {1} is invalid' - .format(node.address, node.port) - ) - try: - node.address = ipfilter(node.address, conf.ip_filters) - except IPOutOfRange: - raise ClientSideError( - 'IP Address {0} is not allowed as a backend node' - .format(node.address) - ) - except: - raise ClientSideError( - 'IP Address {0} not valid'.format(node.address) - ) - - if node.weight != Unset: - try: - weight = int(node.weight) - except ValueError: - raise ClientSideError( - 'Node weight must be an integer' - ) - if weight < 1 or weight > 256: - raise ClientSideError( - 'Node weight must be between 1 and 256' - ) - with db_session() as session: - load_balancer = session.query(LoadBalancer).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.status != 'DELETED').\ - first() - if load_balancer is None: - session.rollback() - raise NotFound('Load Balancer not found') - - if load_balancer.status in ImmutableStates: - session.rollback() - raise ImmutableEntity( - 'Cannot modify a Load Balancer in a non-ACTIVE state' - ', current state: {0}' - .format(load_balancer.status) - ) - - load_balancer.status = 'PENDING_UPDATE' - - # check if we are over limit - nodelimit = session.query(Limits.value).\ - filter(Limits.name == 'maxNodesPerLoadBalancer').scalar() - nodecount = session.query(Node).\ - filter(Node.lbid == self.lbid).count() - if (nodecount + len(body.nodes)) > nodelimit: - session.rollback() - raise OverLimit( - 'Command would exceed Load Balancer node limit' - ) - - return_data = LBNodeResp() - return_data.nodes = [] - - is_galera = False - if load_balancer.protocol.lower() == 'galera': - is_galera = True - - for node in body.nodes: - is_backup = False - if node.backup != Unset and node.backup == 'TRUE': - is_backup = True - - # Galera load balancer sanity checking. Only allowed to add - # backup nodes since a primary is presumably already defined. - if is_galera and not is_backup: - raise ClientSideError( - 'Galera load balancer may have only one primary node' - ) - if node.condition == 'DISABLED': - enabled = 0 - node_status = 'OFFLINE' - else: - enabled = 1 - node_status = 'ONLINE' - weight = 1 - if node.weight != Unset: - weight = node.weight - new_node = Node( - lbid=self.lbid, port=node.port, address=node.address, - enabled=enabled, status=node_status, - weight=weight, backup=int(is_backup) - ) - session.add(new_node) - session.flush() - if new_node.enabled: - condition = 'ENABLED' - else: - condition = 'DISABLED' - if weight == 1: - return_data.nodes.append( - NodeResp( - id=new_node.id, port=new_node.port, - address=new_node.address, condition=condition, - status=new_node.status - ) - ) - else: - return_data.nodes.append( - NodeResp( - id=new_node.id, port=new_node.port, - address=new_node.address, condition=condition, - status=new_node.status, weight=weight - ) - ) - - device = session.query( - Device.id, Device.name, Device.status - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == self.lbid).\ - first() - counter = session.query(Counters).\ - filter(Counters.name == 'api_node_create').first() - counter.value += 1 - session.commit() - submit_job( - 'UPDATE', device.name, device.id, self.lbid - ) - return return_data - - @wsme_pecan.wsexpose(None, body=LBNodePut, status_code=202) - def put(self, body=None): - """ Update a node condition: ENABLED or DISABLED """ - if not self.lbid: - raise ClientSideError('Load Balancer ID has not been supplied') - if not self.nodeid: - raise ClientSideError('Node ID has not been supplied') - if body.condition == Unset and body.weight == Unset: - raise ClientSideError('Node condition or weight is required') - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - # grab the lb - lb = session.query(LoadBalancer).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.status != 'DELETED').first() - - if lb is None: - session.rollback() - raise NotFound('Load Balancer ID is not valid') - - node = session.query(Node).\ - filter(Node.lbid == self.lbid).\ - filter(Node.id == self.nodeid).first() - - if node is None: - session.rollback() - raise NotFound('Node ID is not valid') - - if body.condition != Unset: - if body.condition == 'DISABLED': - nodecount = session.query(Node).\ - filter(Node.lbid == self.lbid).\ - filter(Node.enabled == 1).count() - if nodecount <= 1: - session.rollback() - raise ClientSideError( - "Cannot disable the last enabled node" - ) - node.enabled = 0 - node.status = 'OFFLINE' - else: - node.enabled = 1 - node.status = 'ONLINE' - - if body.weight != Unset: - try: - node.weight = int(body.weight) - except ValueError: - raise ClientSideError( - 'Node weight must be an integer' - ) - if node.weight < 1 or node.weight > 256: - raise ClientSideError( - 'Node weight must be between 1 and 256' - ) - - if lb.status in ImmutableStates: - session.rollback() - raise ImmutableEntity( - 'Cannot modify a Load Balancer in a non-ACTIVE state' - ', current state: {0}' - .format(lb.status) - ) - - lb.status = 'PENDING_UPDATE' - - device = session.query( - Device.id, Device.name, Device.status - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == self.lbid).\ - first() - counter = session.query(Counters).\ - filter(Counters.name == 'api_node_modify').first() - counter.value += 1 - session.commit() - submit_job( - 'UPDATE', device.name, device.id, lb.id - ) - return '' - - @wsme_pecan.wsexpose(None, status_code=202) - def delete(self): - """Remove a node from the load balancer. - - :param load_balancer_id: id of lb - :param node_id: id of node - - Url: - DELETE /loadbalancers/{load_balancer_id}/nodes/{node_id} - - Returns: None - """ - node_id = self.nodeid - tenant_id = get_limited_to_project(request.headers) - if self.lbid is None: - raise ClientSideError('Load Balancer ID has not been supplied') - - tenant_id = get_limited_to_project(request.headers) - with db_session() as session: - load_balancer = session.query(LoadBalancer).\ - filter(LoadBalancer.tenantid == tenant_id).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.status != 'DELETED').\ - first() - if load_balancer is None: - session.rollback() - raise NotFound("Load Balancer not found") - if load_balancer.status in ImmutableStates: - session.rollback() - raise ImmutableEntity( - 'Cannot modify a Load Balancer in a non-ACTIVE state' - ', current state: {0}' - .format(load_balancer.status) - ) - - load_balancer.status = 'PENDING_UPDATE' - - nodecount = session.query(Node).\ - filter(Node.lbid == self.lbid).\ - filter(Node.enabled == 1).count() - # Can't delete the last LB - if nodecount <= 1: - session.rollback() - raise ClientSideError( - "Cannot delete the last enabled node in a load balancer" - ) - - node = session.query(Node).\ - filter(Node.lbid == self.lbid).\ - filter(Node.id == node_id).\ - first() - if not node: - session.rollback() - raise NotFound( - "Node not found in supplied Load Balancer" - ) - - # May not delete the primary node of a Galera LB - if load_balancer.protocol.lower() == 'galera' and node.backup == 0: - session.rollback() - raise ClientSideError( - "Cannot delete the primary node in a Galera load balancer" - ) - - session.delete(node) - device = session.query( - Device.id, Device.name - ).join(LoadBalancer.devices).\ - filter(LoadBalancer.id == self.lbid).\ - first() - counter = session.query(Counters).\ - filter(Counters.name == 'api_node_delete').first() - counter.value += 1 - session.commit() - submit_job( - 'UPDATE', device.name, device.id, self.lbid - ) - return None - - @expose('json') - def _lookup(self, nodeid, *remainder): - """Routes more complex url mapping. - - Raises: 404 - """ - # Kludgy fix for PUT since WSME doesn't like IDs on the path - if nodeid: - return NodesController(self.lbid, nodeid), remainder - abort(404) diff --git a/libra/api/controllers/protocols.py b/libra/api/controllers/protocols.py deleted file mode 100644 index 440f99e6..00000000 --- a/libra/api/controllers/protocols.py +++ /dev/null @@ -1,37 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose -from pecan.rest import RestController -from libra.common.api.lbaas import Ports, db_session - - -class ProtocolsController(RestController): - @expose('json') - def get(self): - protocols = [] - with db_session() as session: - ports = session.query(Ports.protocol, Ports.portnum).\ - filter(Ports.enabled == 1).all() - for item in ports: - data = {} - item = item._asdict() - data["name"] = item["protocol"] - data["port"] = item["portnum"] - protocols.append(data) - - resp = {"protocols": protocols} - session.rollback() - return resp diff --git a/libra/api/controllers/root.py b/libra/api/controllers/root.py deleted file mode 100644 index a1bacbec..00000000 --- a/libra/api/controllers/root.py +++ /dev/null @@ -1,46 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose, response -from v1 import V1Controller -from libra.api.model.responses import Responses - - -class RootController(object): - """root control object.""" - - @expose('json') - def _default(self): - """default route.. acts as catch all for any wrong urls. - For now it returns a 404 because no action is defined for /""" - response.status = 404 - return Responses._default - - @expose() - def _lookup(self, primary_key, *remainder): - if primary_key == 'v1.1': - return V1Controller(), remainder - else: - response.status = 404 - return Responses._default - - @expose('json') - def notfound(self): - return Responses._default - - @expose('json') - def index(self): - response.status = 200 - return Responses.versions diff --git a/libra/api/controllers/session_persistence.py b/libra/api/controllers/session_persistence.py deleted file mode 100644 index 808d639f..00000000 --- a/libra/api/controllers/session_persistence.py +++ /dev/null @@ -1,61 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import response -from pecan.rest import RestController - - -class SessionPersistenceController(RestController): - """SessionPersistenceController - functions for /loadbalancers/{loadBalancerId}/sessionpersistence/* routing - """ - - def get(self, load_balancer_id): - """List session persistence configuration.get - - :param load_balancer_id: id of lb - - Url: - GET /loadbalancers/{load_balancer_id}/sessionpersistence - - Returns: dict - """ - response.status = 201 - return None - - def post(self, load_balancer_id): - """Enable session persistence. - - :param load_balancer_id: id of lb - - Url: - PUT /loadbalancers/{load_balancer_id}/sessionpersistence - - Returns: dict - """ - response.status = 201 - return None - - def delete(self, load_balancer_id): - """Disable session persistence. - - :param load_balancer_id: id of lb - - Url: - DELETE /loadbalancers/{load_balancer_id}/sessionpersistence - - Returns: dict - """ - response.status = 201 diff --git a/libra/api/controllers/v1.py b/libra/api/controllers/v1.py deleted file mode 100644 index a17fc95b..00000000 --- a/libra/api/controllers/v1.py +++ /dev/null @@ -1,46 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import expose, response -from load_balancers import LoadBalancersController -from limits import LimitsController -from protocols import ProtocolsController -from libra.api.model.responses import Responses - - -class V1Controller(object): - """v1 control object.""" - - @expose('json') - def index(self): - response.status = 200 - return Responses.v1_1 - - @expose('json') - def algorithms(self): - """List all supported load balancing algorithms. - - Url: - GET /algorithms - - Returns: dict - """ - response.status = 200 - return Responses.algorithms - - # pecan uses this controller class for urls that start with /loadbalancers - loadbalancers = LoadBalancersController() - limits = LimitsController() - protocols = ProtocolsController() diff --git a/libra/api/controllers/virtualips.py b/libra/api/controllers/virtualips.py deleted file mode 100644 index b98e07bc..00000000 --- a/libra/api/controllers/virtualips.py +++ /dev/null @@ -1,73 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -from pecan import response, expose, request -from pecan.rest import RestController -from libra.common.api.lbaas import LoadBalancer, Vip, Device, db_session -from libra.common.api.lbaas import Counters -from libra.api.acl import get_limited_to_project - - -class VipsController(RestController): - def __init__(self, load_balancer_id=None): - self.lbid = load_balancer_id - - @expose('json') - def get(self): - """Returns a list of virtual ips attached to a specific Load Balancer. - - :param load_balancer_id: id of lb - - Url: - GET /loadbalancers/{load_balancer_id}/virtualips - - Returns: dict - """ - tenant_id = get_limited_to_project(request.headers) - if not self.lbid: - response.status = 400 - return dict( - message="Bad Request", - details="Load Balancer ID not provided" - ) - with db_session() as session: - vip = session.query( - Vip.id, Vip.ip - ).join(LoadBalancer.devices).\ - join(Device.vip).\ - filter(LoadBalancer.id == self.lbid).\ - filter(LoadBalancer.tenantid == tenant_id).first() - - if not vip: - session.rollback() - response.status = 404 - return dict( - message="Not Found", - details="Load Balancer ID not valid" - ) - resp = { - "virtualIps": [{ - "id": vip.id, - "address": str(ipaddress.IPv4Address(vip.ip)), - "type": "PUBLIC", - "ipVersion": "IPV4" - }] - } - counter = session.query(Counters).\ - filter(Counters.name == 'api_vips_get').first() - counter.value += 1 - session.rollback() - return resp diff --git a/libra/api/library/__init__.py b/libra/api/library/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libra/api/library/exp.py b/libra/api/library/exp.py deleted file mode 100644 index c0f1c235..00000000 --- a/libra/api/library/exp.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six -from wsme.exc import ClientSideError -from wsme.utils import _ - - -class IPOutOfRange(Exception): - pass - - -class NotFound(ClientSideError): - def __init__(self, msg=''): - self.msg = msg - super(NotFound, self).__init__() - - @property - def faultstring(self): - return _(six.u("NotFound: %s")) % (self.msg) - - -class OverLimit(ClientSideError): - def __init__(self, msg=''): - self.msg = msg - super(OverLimit, self).__init__() - - @property - def faultstring(self): - return _(six.u("OverLimit: %s")) % (self.msg) - - -class NotAuthorized(ClientSideError): - def __init__(self, msg=''): - self.msg = msg - super(NotAuthorized, self).__init__() - - @property - def faultstring(self): - return _(six.u("NotAuthorized: %s")) % (self.msg) - - -class ImmutableEntity(ClientSideError): - def __init__(self, msg=''): - self.msg = msg - super(ImmutableEntity, self).__init__() - - @property - def faultstring(self): - return _(six.u("ImmutableEntity: %s")) % (self.msg) - -ImmutableStates = [ - 'ERROR', 'PENDING_UPDATE', 'PENDING_DELETE', 'BUILD', 'ERROR(REBUILDING)' -] - -# So we can delete devices that are in a plain ERROR state -ImmutableStatesNoError = ImmutableStates[1:] diff --git a/libra/api/library/ip_filter.py b/libra/api/library/ip_filter.py deleted file mode 100644 index 08140cb9..00000000 --- a/libra/api/library/ip_filter.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -from libra.api.library.exp import IPOutOfRange - - -def ipfilter(address, masks): - address = ipaddress.IPv4Address(address) - if masks and len(masks) > 0: - in_mask = False - for mask in masks: - if address in ipaddress.IPv4Network(unicode(mask), True): - in_mask = True - break - if not in_mask: - raise IPOutOfRange('IP Address not in mask') - return str(address) diff --git a/libra/api/model/__init__.py b/libra/api/model/__init__.py deleted file mode 100644 index 554a28fe..00000000 --- a/libra/api/model/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def init_model(): - """ - This is a stub method which is called at application startup time. - - If you need to bind to a parse database configuration, set up tables or - ORM classes, or perform any database initialization, this is the - recommended place to do it. - - For more information working with databases, and some common recipes, - see http://pecan.readthedocs.org/en/latest/databases.html - """ - pass diff --git a/libra/api/model/responses.py b/libra/api/model/responses.py deleted file mode 100644 index 48c5b19e..00000000 --- a/libra/api/model/responses.py +++ /dev/null @@ -1,72 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the 'License'); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Class Responses -responder objects for framework. -""" - - -class Responses(object): - """404 - not found""" - _default = {'status': '404', 'message': 'Object not Found'} - - """not found """ - not_found = {'message': 'Object not Found'} - - """service_unavailable""" - service_unavailable = {'message': 'Service Unavailable'} - - """algorithms response""" - algorithms = { - 'algorithms': [ - {'name': 'ROUND_ROBIN'}, - {'name': 'LEAST_CONNECTIONS'} - ] - } - - versions = { - "versions": [ - { - "id": "v1.1", - "updated": "2012-12-18T18:30:02.25Z", - "status": "CURRENT", - "links": [ - { - "rel": "self", - "href": "http://wiki.openstack.org/Atlas-LB" - } - ] - } - ] - } - - v1_1 = { - "version": { - "id": "v1.1", - "updated": "2012-12-18T18:30:02.25Z", - "status": "CURRENT", - "links": [ - { - "rel": "self", - "href": "http://wiki.openstack.org/Atlas-LB" - } - ], - "media-types": [ - { - "base": "application/json" - } - ] - } - } diff --git a/libra/api/model/validators.py b/libra/api/model/validators.py deleted file mode 100644 index 9052f8b7..00000000 --- a/libra/api/model/validators.py +++ /dev/null @@ -1,126 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from wsme import types as wtypes -from wsme import wsattr -from wsme.types import Base, Enum - - -class LBNode(Base): - port = wsattr(int, mandatory=True) - address = wsattr(wtypes.text, mandatory=True) - condition = Enum(wtypes.text, 'ENABLED', 'DISABLED') - backup = Enum(wtypes.text, 'TRUE', 'FALSE') - weight = int - - -class LBRespNode(Base): - id = wtypes.text - port = wtypes.text - address = wtypes.text - condition = wtypes.text - status = wtypes.text - weight = wtypes.text - - -class LBNodePut(Base): - condition = Enum(wtypes.text, 'ENABLED', 'DISABLED') - weight = int - - -class NodeResp(Base): - id = int - address = wtypes.text - port = int - condition = wtypes.text - status = wtypes.text - weight = int - - -class LBNodePost(Base): - nodes = wsattr(['LBNode'], mandatory=True) - - -class LBNodeResp(Base): - nodes = wsattr(['NodeResp']) - - -class LBVip(Base): - id = wsattr(int, mandatory=True) - - -class LBOptions(Base): - timeout = int - retries = int - - -class LBPost(Base): - name = wsattr(wtypes.text, mandatory=True) - nodes = wsattr(['LBNode'], mandatory=True) - options = wsattr('LBOptions') - protocol = wtypes.text - algorithm = Enum(wtypes.text, 'ROUND_ROBIN', 'LEAST_CONNECTIONS') - port = int - virtualIps = wsattr(['LBVip']) - - -class LBPut(Base): - name = wtypes.text - algorithm = Enum(wtypes.text, 'ROUND_ROBIN', 'LEAST_CONNECTIONS') - options = wsattr('LBOptions') - - -class LBVipResp(Base): - id = wtypes.text - address = wtypes.text - type = wtypes.text - ipVersion = wtypes.text - - -class LBLogsPost(Base): - objectStoreType = Enum(wtypes.text, 'Swift') - objectStoreEndpoint = wtypes.text - objectStoreBasePath = wtypes.text - authToken = wtypes.text - - -class LBResp(Base): - id = wtypes.text - name = wtypes.text - protocol = wtypes.text - port = wtypes.text - algorithm = wtypes.text - status = wtypes.text - created = wtypes.text - updated = wtypes.text - virtualIps = wsattr(['LBVipResp']) - nodes = wsattr(['LBRespNode']) - options = wsattr('LBOptions') - - -class LBMonitorPut(Base): - type = Enum(wtypes.text, 'CONNECT', 'HTTP') - delay = int - timeout = int - attemptsBeforeDeactivation = int - path = wtypes.text - - -class LBMonitorResp(Base): - type = wtypes.text - delay = wtypes.text - timeout = wtypes.text - attemptsBeforeDeactivation = wtypes.text - path = wtypes.text diff --git a/libra/api/templates/error.html b/libra/api/templates/error.html deleted file mode 100644 index f2d97961..00000000 --- a/libra/api/templates/error.html +++ /dev/null @@ -1,12 +0,0 @@ -<%inherit file="layout.html" /> - -## provide definitions for blocks we want to redefine -<%def name="title()"> - Server Error ${status} - - -## now define the body of the template -
-

Server Error ${status}

-
-

${message}

diff --git a/libra/api/wsme_overrides.py b/libra/api/wsme_overrides.py deleted file mode 100644 index 6b9150b1..00000000 --- a/libra/api/wsme_overrides.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import traceback -import functools -import inspect -import sys -import json - -import wsme -import wsme.rest.args -import wsme.rest.json -import wsme.rest.xml -import wsmeext.pecan -import pecan -from libra.api.library.exp import OverLimit, NotFound, NotAuthorized -from libra.api.library.exp import ImmutableEntity -from libra.openstack.common import log -from libra.common.exc import DetailError -from wsme.rest.json import tojson - - -LOG = log.getLogger(__name__) - - -def format_exception(excinfo, debug=False): - """Extract informations that can be sent to the client.""" - error = excinfo[1] - if isinstance(error, wsme.exc.ClientSideError): - r = dict(message="Bad Request", - details=error.faultstring) - LOG.warning("Client-side error: %s" % r['details']) - return r - else: - faultstring = str(error) - debuginfo = "\n".join(traceback.format_exception(*excinfo)) - - LOG.error('Server-side error: "%s". Detail: \n%s' % ( - faultstring, debuginfo)) - - if isinstance(error, DetailError): - r = dict(message="Server error", details=faultstring) - if isinstance(error, ValueError): - r = dict(message="Bad Request", details=faultstring) - else: - r = dict(message="Load Balancer Fault", details=None) - if debug: - r['debuginfo'] = debuginfo - return r - -wsme.api.format_exception = format_exception - - -def encode_result(value, datatype, **options): - jsondata = tojson(datatype, value) - if options.get('nest_result', False): - jsondata = {options.get('nested_result_attrname', 'result'): jsondata} - if jsondata: - return json.dumps(jsondata) - else: - return '' - -wsme.rest.json.encode_result = encode_result - - -def wsexpose(*args, **kwargs): - pecan_json_decorate = pecan.expose( - template='wsmejson:', - content_type='application/json', - generic=False) - pecan_xml_decorate = pecan.expose( - template='wsmexml:', - content_type='application/xml', - generic=False - ) - sig = wsme.signature(*args, **kwargs) - - def decorate(f): - sig(f) - funcdef = wsme.api.FunctionDefinition.get(f) - funcdef.resolve_types(wsme.types.registry) - - @functools.wraps(f) - def callfunction(self, *args, **kwargs): - try: - args, kwargs = wsme.rest.args.get_args( - funcdef, args, kwargs, pecan.request.params, None, - pecan.request.body, pecan.request.content_type - ) - if funcdef.pass_request: - kwargs[funcdef.pass_request] = pecan.request - result = f(self, *args, **kwargs) - - # NOTE: Support setting of status_code with default 201 - pecan.response.status = funcdef.status_code - if isinstance(result, wsme.api.Response): - pecan.response.status = result.status_code - result = result.obj - - except: - data = wsme.api.format_exception( - sys.exc_info(), - pecan.conf.get('wsme', {}).get('debug', False) - ) - e = sys.exc_info()[1] - if isinstance(e, OverLimit): - pecan.response.status = 413 - elif isinstance(e, ImmutableEntity): - pecan.response.status = 422 - elif isinstance(e, NotFound): - pecan.response.status = 404 - elif isinstance(e, NotAuthorized): - pecan.response.status = 401 - elif data['message'] == 'Bad Request': - pecan.response.status = 400 - else: - pecan.response.status = 500 - return data - - return dict( - datatype=funcdef.return_type, - result=result - ) - - pecan_xml_decorate(callfunction) - pecan_json_decorate(callfunction) - pecan.util._cfg(callfunction)['argspec'] = inspect.getargspec(f) - callfunction._wsme_definition = funcdef - return callfunction - - return decorate - -wsmeext.pecan.wsexpose = wsexpose - - -class JSonRenderer(object): - def __init__(self, path, extra_vars): - pass - - def render(self, template_path, namespace): - if 'message' in namespace: - return wsme.rest.json.encode_error(None, namespace) - return wsme.rest.json.encode_result( - namespace['result'], - namespace['datatype'] - ) - -pecan.templating._builtin_renderers['wsmejson'] = JSonRenderer diff --git a/libra/common/__init__.py b/libra/common/__init__.py deleted file mode 100644 index 582348cb..00000000 --- a/libra/common/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/common/api/__init__.py b/libra/common/api/__init__.py deleted file mode 100644 index 582348cb..00000000 --- a/libra/common/api/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/common/api/gearman_client.py b/libra/common/api/gearman_client.py deleted file mode 100644 index 0ea8edc5..00000000 --- a/libra/common/api/gearman_client.py +++ /dev/null @@ -1,554 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -eventlet.monkey_patch() -import ipaddress -from libra.common.json_gearman import JSONGearmanClient -from libra.common.api.lbaas import LoadBalancer, db_session, Device, Node, Vip -from libra.common.api.lbaas import HealthMonitor, Counters -from libra.common.api.lbaas import loadbalancers_devices -from libra.common.api.mnb import update_mnb -from libra.openstack.common import log -from pecan import conf - - -LOG = log.getLogger(__name__) - - -gearman_workers = [ - 'UPDATE', # Create/Update a Load Balancer. - 'SUSPEND', # Suspend a Load Balancer. - 'ENABLE', # Enable a suspended Load Balancer. - 'DELETE', # Delete a Load Balancer. - 'DISCOVER', # Return service discovery information. - 'ARCHIVE', # Archive LB log files. - 'METRICS', # Get load balancer statistics. - 'STATS' # Ping load balancers -] - - -def submit_job(job_type, host, data, lbid): - eventlet.spawn_n(client_job, job_type, str(host), data, lbid) - - -def submit_vip_job(job_type, device, vip): - eventlet.spawn_n( - client_job, job_type, "libra_pool_mgm", device, vip - ) - - -def client_job(job_type, host, data, lbid): - try: - client = GearmanClientThread(host, lbid) - LOG.info( - "Sending Gearman job {0} to {1} for loadbalancer {2}".format( - job_type, host, lbid - ) - ) - if job_type == 'UPDATE': - client.send_update(data) - if job_type == 'DELETE': - client.send_delete(data) - if job_type == 'ARCHIVE': - client.send_archive(data) - if job_type == 'ASSIGN': - # Try the assign 5 times - for x in xrange(0, 5): - status = client.send_assign(data) - if status: - break - with db_session() as session: - device = session.query(Device).\ - filter(Device.name == data).first() - if device is None: - LOG.error( - "Device {0} not found in ASSIGN, this shouldn't happen" - .format(data) - ) - return - mnb_data = {} - if not status: - LOG.error( - "Giving up vip assign for device {0}".format(data) - ) - errmsg = 'Floating IP assign failed' - client._set_error(device.id, errmsg, session) - else: - lbs = session.query( - LoadBalancer - ).join(LoadBalancer.nodes).\ - join(LoadBalancer.devices).\ - filter(Device.id == device.id).\ - filter(LoadBalancer.status != 'DELETED').\ - all() - for lb in lbs: - if lb.status == 'BUILD': - # Only send a create message to MnB if we - # are going from BUILD to ACTIVE. After the - # DB is updated. - mnb_data["lbid"] = lb.id - mnb_data["tenantid"] = lb.tenantid - lb.status = 'ACTIVE' - device.status = 'ONLINE' - session.commit() - - # Send the MnB create if needed - if "lbid" in mnb_data: - update_mnb('lbaas.instance.create', - mnb_data["lbid"], - mnb_data["tenantid"]) - - if job_type == 'REMOVE': - client.send_remove(data) - return - except: - LOG.exception("Gearman thread unhandled exception") - - -class GearmanClientThread(object): - def __init__(self, host, lbid): - self.host = host - self.lbid = lbid - - server_list = [] - for server in conf.gearman.server: - ghost, gport = server.split(':') - server_list.append({'host': ghost, - 'port': int(gport), - 'keyfile': conf.gearman.ssl_key, - 'certfile': conf.gearman.ssl_cert, - 'ca_certs': conf.gearman.ssl_ca, - 'keepalive': conf.gearman.keepalive, - 'keepcnt': conf.gearman.keepcnt, - 'keepidle': conf.gearman.keepidle, - 'keepintvl': conf.gearman.keepintvl}) - self.gearman_client = JSONGearmanClient(server_list) - - def send_assign(self, data): - NULL = None # For pep8 - with db_session() as session: - device = session.query(Device).\ - filter(Device.name == data).first() - if device is None: - LOG.error( - "VIP assign have been given non existent device {0}" - .format(data) - ) - session.rollback() - return False - if not self.lbid: - vip = session.query(Vip).\ - filter(Vip.device == NULL).\ - with_lockmode('update').\ - first() - if vip is None: - errmsg = 'Floating IP assign failed (none available)' - LOG.error( - "Failed to assign IP to device {0} (none available)" - .format(data) - ) - self._set_error(device.id, errmsg, session) - session.commit() - return False - else: - vip = session.query(Vip).\ - filter(Vip.id == self.lbid).first() - if vip is None: - errmsg = 'Cannot find existing floating IP' - LOG.error( - "Failed to assign IP to device {0}" - .format(data) - ) - self._set_error(device.id, errmsg, session) - session.commit() - return False - vip.device = device.id - vip_id = vip.id - vip_ip = vip.ip - session.commit() - ip_str = str(ipaddress.IPv4Address(vip_ip)) - - job_data = { - 'action': 'ASSIGN_IP', - 'name': data, - 'ip': ip_str - } - status, response = self._send_message(job_data, 'response') - if status: - return True - elif self.lbid: - LOG.error( - "Failed to assign IP {0} to device {1}" - .format(ip_str, data) - ) - else: - LOG.error( - "Failed to assign IP {0} to device {1}" - .format(ip_str, data) - ) - # set to device 0 to make sure it won't be used again - with db_session() as session: - vip = session.query(Vip).filter(Vip.id == vip_id).first() - vip.device = 0 - session.commit() - submit_vip_job('REMOVE', None, ip_str) - return False - - def send_remove(self, data=None): - job_data = { - 'action': 'DELETE_IP', - 'ip': self.lbid - } - ip_int = int(ipaddress.IPv4Address(unicode(self.lbid))) - for x in xrange(0, 5): - LOG.info( - 'Attempt to delete IP {0} #{1}' - .format(self.lbid, x) - ) - status, response = self._send_message(job_data, 'response') - if status: - break - with db_session() as session: - if not status: - LOG.error( - "Failed to delete IP {0}" - .format(self.lbid) - ) - # Set to 0 to mark as something that needs cleaning up - # but cannot be used again - vip = session.query(Vip).\ - filter(Vip.ip == ip_int).first() - vip.device = 0 - else: - session.query(Vip).\ - filter(Vip.ip == ip_int).delete() - counter = session.query(Counters).\ - filter(Counters.name == 'vips_deleted').first() - counter.value += 1 - session.commit() - - def send_delete(self, data): - with db_session() as session: - count = session.query( - LoadBalancer - ).join(LoadBalancer.devices).\ - filter(Device.id == data).\ - filter(LoadBalancer.id != self.lbid).\ - filter(LoadBalancer.status != 'DELETED').\ - filter(LoadBalancer.status != 'PENDING_DELETE').\ - count() - if count >= 1: - # This is an update message because we want to retain the - # remaining LB - keep_lb = session.query(LoadBalancer).\ - join(LoadBalancer.nodes).\ - join(LoadBalancer.devices).\ - filter(Device.id == data).\ - filter(LoadBalancer.id != self.lbid).\ - filter(LoadBalancer.status != 'DELETED').\ - filter(LoadBalancer.status != 'PENDING_DELETE').\ - first() - job_data = { - 'hpcs_action': 'UPDATE', - 'loadBalancers': [{ - 'name': keep_lb.name, - 'protocol': keep_lb.protocol, - 'algorithm': keep_lb.algorithm, - 'port': keep_lb.port, - 'nodes': [] - }] - } - for node in keep_lb.nodes: - if not node.enabled: - continue - condition = 'ENABLED' - node_data = { - 'id': node.id, 'port': node.port, - 'address': node.address, 'weight': node.weight, - 'condition': condition - } - job_data['loadBalancers'][0]['nodes'].append(node_data) - else: - # This is a delete - dev = session.query(Device.name).\ - filter(Device.id == data).first() - vip = session.query(Vip).\ - filter(Vip.device == data).first() - if vip: - submit_vip_job( - 'REMOVE', dev.name, str(ipaddress.IPv4Address(vip.ip)) - ) - job_data = {"hpcs_action": "DELETE"} - - status, response = self._send_message(job_data, 'hpcs_response') - lb = session.query(LoadBalancer).\ - filter(LoadBalancer.id == self.lbid).\ - first() - if not status: - LOG.error( - "Failed Gearman delete for LB {0}".format(lb.id) - ) - self._set_error(data, response, session) - lb.status = 'DELETED' - tenant_id = lb.tenantid - - if count == 0: - # Device should never be used again - device = session.query(Device).\ - filter(Device.id == data).first() - device.status = 'DELETED' - # Remove LB-device join - session.execute(loadbalancers_devices.delete().where( - loadbalancers_devices.c.loadbalancer == lb.id - )) - session.query(Node).\ - filter(Node.lbid == lb.id).delete() - session.query(HealthMonitor).\ - filter(HealthMonitor.lbid == lb.id).delete() - counter = session.query(Counters).\ - filter(Counters.name == 'loadbalancers_deleted').first() - counter.value += 1 - session.commit() - - # Notify billing of the LB deletion - update_mnb('lbaas.instance.delete', self.lbid, tenant_id) - - def _set_error(self, device_id, errmsg, session): - lbs = session.query( - LoadBalancer - ).join(LoadBalancer.nodes).\ - join(LoadBalancer.devices).\ - filter(Device.id == device_id).\ - filter(LoadBalancer.status != 'DELETED').\ - all() - device = session.query(Device).\ - filter(Device.id == device_id).\ - first() - if device is None: - # Device already deleted, probably a race between the OFFLINE check - # and auto-failover - return - device.status = 'ERROR' - counter = session.query(Counters).\ - filter(Counters.name == 'loadbalancers_error').first() - for lb in lbs: - counter.value += 1 - lb.status = 'ERROR' - lb.errmsg = errmsg - - def send_archive(self, data): - with db_session() as session: - lb = session.query(LoadBalancer).\ - filter(LoadBalancer.id == self.lbid).\ - first() - job_data = { - 'hpcs_action': 'ARCHIVE', - 'hpcs_object_store_basepath': data['objectStoreBasePath'], - 'hpcs_object_store_endpoint': data['objectStoreEndpoint'], - 'hpcs_object_store_token': data['authToken'], - 'hpcs_object_store_type': data['objectStoreType'], - 'loadBalancers': [{ - 'id': str(lb.id), - 'name': lb.name, - 'protocol': lb.protocol - }] - } - status, response = self._send_message(job_data, 'hpcs_response') - device = session.query(Device).\ - filter(Device.id == data['deviceid']).\ - first() - if status: - device.errmsg = 'Log archive successful' - else: - device.errmsg = 'Log archive failed: {0}'.format(response) - lb.status = 'ACTIVE' - counter = session.query(Counters).\ - filter(Counters.name == 'log_archives').first() - counter.value += 1 - session.commit() - - def send_update(self, data): - with db_session() as session: - lbs = session.query( - LoadBalancer - ).join(LoadBalancer.nodes).\ - join(LoadBalancer.devices).\ - filter(Device.id == data).\ - filter(LoadBalancer.status != 'DELETED').\ - all() - job_data = { - 'hpcs_action': 'UPDATE', - 'loadBalancers': [] - } - - degraded = [] - if lbs is None: - LOG.error( - 'Attempting to send empty LB data for device {0} ({1}), ' - 'something went wrong'.format(data, self.host) - ) - self._set_error(data, "LB config error", session) - session.commit() - return - - for lb in lbs: - lb_data = { - 'name': lb.name, - 'protocol': lb.protocol, - 'algorithm': lb.algorithm, - 'port': lb.port, - 'nodes': [], - 'monitor': {} - } - for node in lb.nodes: - if not node.enabled: - continue - condition = 'ENABLED' - backup = 'FALSE' - if node.backup != 0: - backup = 'TRUE' - node_data = { - 'id': node.id, 'port': node.port, - 'address': node.address, 'weight': node.weight, - 'condition': condition, 'backup': backup - } - - lb_data['nodes'].append(node_data) - # Track if we have a DEGRADED LB - if node.status == 'ERROR': - degraded.append(lb.id) - - # Add a default health monitor if one does not exist - monitor = session.query(HealthMonitor).\ - filter(HealthMonitor.lbid == lb.id).first() - - if monitor is None: - # Set it to a default configuration - monitor = HealthMonitor( - lbid=lb.id, type="CONNECT", delay=30, - timeout=30, attempts=2, path=None - ) - session.add(monitor) - session.flush() - - monitor_data = { - 'type': monitor.type, - 'delay': monitor.delay, - 'timeout': monitor.timeout, - 'attempts': monitor.attempts - } - if monitor.path is not None: - monitor_data['path'] = monitor.path - - # All new LBs created since these options were supported - # will have default values in the DB. Pre-existing LBs will - # not have any values, so we need to check for that. - if any([lb.timeout, lb.retries]): - lb_data['options'] = { - 'client_timeout': lb.timeout, - 'server_timeout': lb.timeout, - 'connect_timeout': lb.timeout, - 'connect_retries': lb.retries - } - - lb_data['monitor'] = monitor_data - job_data['loadBalancers'].append(lb_data) - - # Update the worker - mnb_data = {} - status, response = self._send_message(job_data, 'hpcs_response') - if not status: - self._set_error(data, response, session) - else: - for lb in lbs: - if lb.id in degraded: - lb.status = 'DEGRADED' - lb.errmsg = "A node on the load balancer has failed" - elif lb.status == 'ERROR': - # Do nothing because something else failed in the mean - # time - pass - elif lb.status == 'BUILD': - # Do nothing if a new device, stay in BUILD state until - # floating IP assign finishes - if len(lbs) > 1: - lb.status = 'ACTIVE' - if lb.id == self.lbid: - # This is the new LB being added to a device. - # We don't have to assign a vip so we can - # notify billing of the LB creation (once the - # DB is updated) - mnb_data["lbid"] = lb.id - mnb_data["tenantid"] = lb.tenantid - else: - lb.status = 'ACTIVE' - lb.errmsg = None - device = session.query(Device).\ - filter(Device.id == data).\ - first() - if device is None: - # Shouldn't hit here, but just to be safe - session.commit() - return - if device.status == 'BUILD' and len(lbs) > 1: - device.status = 'ONLINE' - device_name = device.name - device_status = device.status - counter = session.query(Counters).\ - filter(Counters.name == 'loadbalancers_updated').first() - counter.value += 1 - session.commit() - if device_status == 'BUILD': - submit_vip_job( - 'ASSIGN', device_name, None - ) - - # Send the MnB create if needed - if "lbid" in mnb_data: - update_mnb('lbaas.instance.create', - mnb_data["lbid"], - mnb_data["tenantid"]) - - def _send_message(self, message, response_name): - job_status = self.gearman_client.submit_job( - self.host, message, background=False, wait_until_complete=True, - max_retries=10, poll_timeout=120.0 - ) - if job_status.state == 'UNKNOWN': - # Gearman server connection failed - LOG.error('Could not talk to gearman server') - return False, "System error communicating with load balancer" - if job_status.timed_out: - # Job timed out - LOG.warning( - 'Gearman timeout talking to {0}'.format(self.host) - ) - return False, "Timeout error communicating with load balancer" - LOG.debug(job_status.result) - if 'badRequest' in job_status.result: - error = job_status.result['badRequest']['validationErrors'] - return False, error['message'] - if job_status.result[response_name] == 'FAIL': - # Worker says 'no' - if 'hpcs_error' in job_status.result: - error = job_status.result['hpcs_error'] - else: - error = 'Load Balancer error' - LOG.error( - 'Gearman error response from {0}: {1}'.format(self.host, error) - ) - return False, error - LOG.info('Gearman success from {0}'.format(self.host)) - return True, job_status.result diff --git a/libra/common/api/lbaas.py b/libra/common/api/lbaas.py deleted file mode 100644 index 6517d659..00000000 --- a/libra/common/api/lbaas.py +++ /dev/null @@ -1,302 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the 'License'); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ConfigParser -import sqlalchemy.types as types -import time - -from oslo.config import cfg -from pecan import conf -from sqlalchemy import Table, Column, Integer, ForeignKey, create_engine -from sqlalchemy import INTEGER, VARCHAR, BIGINT, DATETIME -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import relationship, backref, sessionmaker, Session - -from libra.openstack.common import log - -LOG = log.getLogger(__name__) - - -DeclarativeBase = declarative_base() -metadata = DeclarativeBase.metadata - -loadbalancers_devices = Table( - 'loadbalancers_devices', - metadata, - Column('loadbalancer', Integer, ForeignKey('loadbalancers.id')), - Column('device', Integer, ForeignKey('devices.id')) -) - - -class FormatedDateTime(types.TypeDecorator): - '''formats date to match iso 8601 standards - ''' - - impl = types.DateTime - - def process_result_value(self, value, dialect): - return value.strftime('%Y-%m-%dT%H:%M:%S') - - -class TenantLimits(DeclarativeBase): - __tablename__ = 'tenant_limits' - id = Column(u'id', Integer, primary_key=True, nullable=False) - tenantid = Column(u'tenantid', VARCHAR(length=128), nullable=False) - loadbalancers = Column(u'loadbalancers', INTEGER(), nullable=True) - - -class Limits(DeclarativeBase): - __tablename__ = 'global_limits' - id = Column(u'id', Integer, primary_key=True, nullable=False) - name = Column(u'name', VARCHAR(length=128), nullable=False) - value = Column(u'value', BIGINT(), nullable=False) - - -class AdminAuth(DeclarativeBase): - __tablename__ = 'admin_auth' - id = Column(u'id', Integer, primary_key=True, nullable=False) - tenant_id = Column(u'tenant_id', VARCHAR(length=128), nullable=False) - level = Column(u'level', VARCHAR(length=10), nullable=False) - - -class PoolBuilding(DeclarativeBase): - __tablename__ = 'pool_building' - id = Column(u'id', Integer, primary_key=True, nullable=False) - server_id = Column(u'server_id', Integer, nullable=False) - qty = Column(u'qty', Integer, nullable=False) - - -class Vip(DeclarativeBase): - __tablename__ = 'vips' - id = Column(u'id', Integer, primary_key=True, nullable=False) - ip = Column(u'ip', Integer, nullable=True) - device = Column(u'device', Integer, ForeignKey('devices.id')) - - -class Device(DeclarativeBase): - """device model""" - __tablename__ = 'devices' - # column definitions - az = Column(u'az', INTEGER(), nullable=False) - created = Column(u'created', FormatedDateTime(), nullable=False) - floatingIpAddr = Column( - u'floatingIpAddr', VARCHAR(length=128), nullable=False - ) - id = Column(u'id', BIGINT(), primary_key=True, nullable=False) - name = Column(u'name', VARCHAR(length=128), nullable=False) - publicIpAddr = Column(u'publicIpAddr', VARCHAR(length=128), nullable=False) - status = Column(u'status', VARCHAR(length=128), nullable=False) - type = Column(u'type', VARCHAR(length=128), nullable=False) - pingCount = Column(u'pingCount', INTEGER(), nullable=False) - updated = Column(u'updated', FormatedDateTime(), nullable=False) - vip = relationship("Vip", uselist=False, backref="devices") - - -class LoadBalancer(DeclarativeBase): - """load balancer model""" - __tablename__ = 'loadbalancers' - # column definitions - algorithm = Column(u'algorithm', VARCHAR(length=80), nullable=False) - errmsg = Column(u'errmsg', VARCHAR(length=128), nullable=True) - id = Column(u'id', BIGINT(), primary_key=True, nullable=False) - name = Column(u'name', VARCHAR(length=128), nullable=False) - port = Column(u'port', INTEGER(), nullable=False) - protocol = Column(u'protocol', VARCHAR(length=128), nullable=False) - status = Column(u'status', VARCHAR(length=50), nullable=False) - tenantid = Column(u'tenantid', VARCHAR(length=128), nullable=False) - updated = Column(u'updated', FormatedDateTime(), nullable=False) - created = Column(u'created', FormatedDateTime(), nullable=False) - timeout = Column(u'timeout', INTEGER(), nullable=True) - retries = Column(u'retries', INTEGER(), nullable=True) - nodes = relationship( - 'Node', backref=backref('loadbalancers', order_by='Node.id') - ) - monitors = relationship( - 'HealthMonitor', backref=backref( - 'loadbalancers', - order_by='HealthMonitor.lbid') - ) - devices = relationship( - 'Device', secondary=loadbalancers_devices, backref='loadbalancers', - lazy='joined' - ) - - -class Node(DeclarativeBase): - """node model""" - __tablename__ = 'nodes' - # column definitions - address = Column(u'address', VARCHAR(length=128), nullable=False) - enabled = Column(u'enabled', INTEGER(), nullable=False) - id = Column(u'id', BIGINT(), primary_key=True, nullable=False) - lbid = Column( - u'lbid', BIGINT(), ForeignKey('loadbalancers.id'), nullable=False - ) - port = Column(u'port', INTEGER(), nullable=False) - status = Column(u'status', VARCHAR(length=128), nullable=False) - weight = Column(u'weight', INTEGER(), nullable=False) - backup = Column(u'backup', INTEGER(), nullable=False, default=0) - - -class HealthMonitor(DeclarativeBase): - """monitors model""" - __tablename__ = 'monitors' - # column definitions - lbid = Column( - u'lbid', BIGINT(), ForeignKey('loadbalancers.id'), primary_key=True, - nullable=False - ) - type = Column(u'type', VARCHAR(length=128), nullable=False) - delay = Column(u'delay', INTEGER(), nullable=False) - timeout = Column(u'timeout', INTEGER(), nullable=False) - attempts = Column( - u'attemptsBeforeDeactivation', INTEGER(), nullable=False - ) - path = Column(u'path', VARCHAR(length=2000)) - - -class Billing(DeclarativeBase): - __tablename__ = 'billing' - id = Column(u'id', Integer, primary_key=True, nullable=False) - name = Column(u'name', VARCHAR(length=128), nullable=False) - last_update = Column(u'last_update', DATETIME(), nullable=False) - - -class Stats(DeclarativeBase): - """stats model""" - __tablename__ = 'stats' - # column definitions - id = Column(u'id', BIGINT(), primary_key=True, nullable=False) - lbid = Column( - u'lbid', BIGINT(), ForeignKey('loadbalancers.id'), primary_key=True, - nullable=False - ) - period_start = Column(u'period_start', DATETIME(), nullable=False) - period_end = Column(u'period_end', DATETIME(), nullable=False) - bytes_out = Column(u'bytes_out', BIGINT(), nullable=False) - status = Column(u'status', VARCHAR(length=50), nullable=False) - - -class Ports(DeclarativeBase): - """ports model""" - __tablename__ = 'ports' - # column definitions - id = Column(u'id', BIGINT(), primary_key=True, nullable=False) - protocol = Column(u'protocol', VARCHAR(length=50), nullable=False) - portnum = Column(u'portnum', BIGINT(), nullable=False) - enabled = Column(u'enabled', INTEGER(), nullable=False, default=0) - - -class Counters(DeclarativeBase): - __tablename__ = 'counters' - id = Column(u'id', Integer, primary_key=True, nullable=False) - name = Column(u'name', VARCHAR(length=50), nullable=False) - value = Column(u'value', BIGINT(), primary_key=True, nullable=False) - - -class RoutingSession(Session): - """ Try to use the first engine provided. If this fails use the next in - sequence and so on. Reset to the first after 60 seconds - we do this because we can end up with deadlocks in Galera, see - http://tinyurl.com/9h6qlly """ - - engines = {} - engines_count = 0 - use_engine = 0 - last_engine_time = 0 - - def get_bind(self, mapper=None, clause=None): - if not RoutingSession.engines: - self._build_engines() - - if ( - RoutingSession.use_engine > 0 - and time.time() < RoutingSession.last_engine_time + 60 - ): - RoutingSession.last_engine_time = time.time() - RoutingSession.use_engine = 0 - engine = RoutingSession.engines[RoutingSession.use_engine] - return engine - - def _build_engines(self): - # We have to use ConfigParser here because with oslo.config, we need - # to know the section names before parsing. - config = ConfigParser.SafeConfigParser() - config.read(cfg.CONF['config_file']) - - if 'debug' in conf.app and conf.app.debug: - echo = True - else: - echo = False - - for section in conf.database: - db_conf = config._sections[section] - - conn_string = '''mysql+mysqlconnector://%s:%s@%s:%s/%s''' % ( - db_conf['username'], - db_conf['password'], - db_conf['host'], - db_conf['port'], - db_conf['schema'] - ) - - if 'ssl_key' in db_conf: - ssl_args = {'ssl': { - 'cert': db_conf['ssl_cert'], - 'key': db_conf['ssl_key'], - 'ca': db_conf['ssl_ca'] - }} - - engine = create_engine( - conn_string, isolation_level="READ COMMITTED", - pool_size=20, connect_args=ssl_args, pool_recycle=3600, - echo=echo - ) - else: - engine = create_engine( - conn_string, isolation_level="READ COMMITTED", - pool_size=20, pool_recycle=3600, - echo=echo - ) - RoutingSession.engines[RoutingSession.engines_count] = engine - RoutingSession.engines_count += 1 - - -class db_session(object): - def __init__(self): - self.session = None - - def __enter__(self): - for x in xrange(10): - try: - self.session = sessionmaker(class_=RoutingSession)() - self.session.execute("SELECT 1") - return self.session - except: - LOG.error( - 'Could not connect to DB server: {0}'.format( - RoutingSession.engines[RoutingSession.use_engine].url - ) - ) - RoutingSession.last_engine_time = time.time() - RoutingSession.use_engine += 1 - if RoutingSession.use_engine == RoutingSession.engines_count: - RoutingSession.use_engine = 0 - LOG.error('Could not connect to any DB server') - return None - - def __exit__(self, type, value, traceback): - self.session.close() - return False diff --git a/libra/common/api/lbaas.sql b/libra/common/api/lbaas.sql deleted file mode 100644 index 7e3b2f72..00000000 --- a/libra/common/api/lbaas.sql +++ /dev/null @@ -1,162 +0,0 @@ -# LBaaS Database schema -# pemellquist@gmail.com - -DROP DATABASE IF EXISTS lbaas; -CREATE DATABASE lbaas; -USE lbaas; - -# versions, used to define overall version for schema -# major version differences are not backward compatibile -create TABLE versions ( - major INT NOT NULL, - minor INT NOT NULL, - PRIMARY KEY (major) -); -INSERT INTO versions values (2,0); - -# loadbalancers -CREATE TABLE loadbalancers ( - id BIGINT NOT NULL AUTO_INCREMENT, # unique id for this loadbalancer, generated by DB when record is created - name VARCHAR(128) NOT NULL, # tenant assigned load balancer name - tenantid VARCHAR(128) NOT NULL, # tenant id who owns this loadbalancer - protocol VARCHAR(128) NOT NULL, # loadbalancer protocol used, can be 'HTTP', 'TCP' or 'HTTPS' - port INT NOT NULL, # TCP port number associated with protocol and used by loadbalancer northbound interface - status VARCHAR(50) NOT NULL, # current status, see ATLAS API 1.1 for all possible values - algorithm VARCHAR(80) NOT NULL, # LB Algorithm in use e.g. ROUND_ROBIN, see ATLAS API 1.1 for all possible values - created TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', # timestamp of when LB was created - updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, # timestamp of when LB was last updated - errmsg VARCHAR(128) DEFAULT NULL, # optional error message which can describe details regarding LBs state, can be blank if no error state exists - timeout INT, - retries INT, - PRIMARY KEY (id) # ids are unique accross all LBs - ) DEFAULT CHARSET utf8 DEFAULT COLLATE utf8_general_ci; - - #nodes - CREATE TABLE nodes ( - id BIGINT NOT NULL AUTO_INCREMENT, # unique id for this node, generated by DB when record is created - lbid BIGINT NOT NULL, # Loadbalancer who owns this node - address VARCHAR(128) NOT NULL, # IPV4 or IPV6 address for this node - port INT NOT NULL, # TCP port number associated with this node and used from LB to node - weight INT NOT NULL, # Node weight if applicable to algorithm used - enabled BOOLEAN NOT NULL, # is node enabled or not - status VARCHAR(128) NOT NULL, # status of node 'OFFLINE', 'ONLINE', 'ERROR', this value is reported by the device - backup BOOLEAN NOT NULL DEFAULT FALSE, # true if a backup node - PRIMARY KEY (id) # ids are unique accross all Nodes - ) DEFAULT CHARSET utf8 DEFAULT COLLATE utf8_general_ci; - - - # devices -CREATE TABLE devices ( - id BIGINT NOT NULL AUTO_INCREMENT, # unique id for this device, generated by DB when record is created - name VARCHAR(128) NOT NULL, # admin assigned device name, this is the unique gearman worker function name - floatingIpAddr VARCHAR(128) NOT NULL, # IPV4 or IPV6 address of device for floating IP - publicIpAddr VARCHAR(128) NOT NULL, # IPV4 or IPV6 address of device for floating IP - az INT NOT NULL, # availability zone in which this device exists - type VARCHAR(128) NOT NULL, # text description of type of device, e.g. 'HAProxy' - created TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', # timestamp of when device was created (default sets to current timestamp on row create) - updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, # timestamp of when device was last updated - pingCount INT NOT NULL, # Number of ping failures against an OFFLINE device - status VARCHAR(128) NOT NULL, # status of device 'OFFLINE', 'ONLINE', 'ERROR', this value is reported by the device - PRIMARY KEY (id) -) DEFAULT CHARSET utf8 DEFAULT COLLATE utf8_general_ci; - -CREATE TABLE `loadbalancers_devices` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `loadbalancer` int(11) DEFAULT NULL, - `device` int(11) DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=latin1; - -CREATE TABLE monitors ( - lbid BIGINT NOT NULL, # Loadbalancer who owns this node - type VARCHAR(128) NOT NULL, # Type of ping. CONNECT, HTTP, HTTPS - delay INT NOT NULL, # This is the minimum time in seconds between regular calls to a monitor - timeout INT NOT NULL, # Maximum number of seconds to wait for a connection to the node before it times out. - attemptsBeforeDeactivation INT NOT NULL, # Number of permissible failures before removing a node from rotation. 1 to 10. - path VARCHAR(2000) NULL, # The HTTP path used in the request by the monitor. Begins with / - PRIMARY KEY (lbid) # ids are unique across all Nodes - ) DEFAULT CHARSET utf8 DEFAULT COLLATE utf8_general_ci; - -CREATE TABLE `pool_building` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `server_id` int(11) NOT NULL, - `qty` int(11) NOT NULL, - PRIMARY KEY (`id`), - KEY `server_id` (`server_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE TABLE `vips` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `ip` int(11) DEFAULT NULL, - `device` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `device` (`device`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE TABLE `global_limits` ( - `id` int(11) NOT NULL, - `name` varchar(128) NOT NULL, - `value` bigint(20) NOT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -INSERT INTO `global_limits` VALUES (1,'maxLoadBalancerNameLength',128),(2,'maxVIPsPerLoadBalancer',1),(3,'maxNodesPerLoadBalancer',50),(4,'maxLoadBalancers',20); - -CREATE TABLE `tenant_limits` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `tenantid` VARCHAR(128) NOT NULL, - `loadbalancers` INT, # Max number of load balancers - PRIMARY KEY(id), - UNIQUE KEY `tenantid` (`tenantid`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -# Billing -CREATE TABLE billing ( - id int(11) NOT NULL, - name varchar(128) NOT NULL, - last_update DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', # timestamp of when the feature was last updated - PRIMARY KEY (id) -) ENGINE=InnoDB DEFAULT CHARSET latin1; - -INSERT INTO billing VALUES (1, 'stats', '0000-00-00 00:00:00'),(2, 'usage', '0000-00-00 00:00:00'),(3, 'exists', '0000-00-00 00:00:00'); - -# Admin API auth -CREATE TABLE admin_auth ( - id int(11) NOT NULL AUTO_INCREMENT, - tenant_id varchar(128) NOT NULL, - level varchar(10) NOT NULL, - PRIMARY KEY(id) -) ENGINE=InnoDB DEFAULT CHARSET latin1; - -# Counters -CREATE TABLE counters ( - id int(11) NOT NULL AUTO_INCREMENT, - name varchar(50) NOT NULL, - value BIGINT NOT NULL, - PRIMARY KEY(id) -) ENGINE=InnoDB DEFAULT CHARSET latin1; - -INSERT INTO counters VALUES (1, 'loadbalancers_rebuild', 0),(2, 'loadbalancers_error', 0),(3, 'devices_offline_failed', 0),(4, 'loadbalancers_expunged', 0),(5, 'devices_deleted', 0), (6, 'vips_built', 0), (7, 'devices_built', 0), (8, 'devices_bad_built', 0), (9, 'vips_deleted', 0), (10, 'loadbalancers_deleted', 0), (11, 'log_archives',0), (12, 'loadbalancers_updated', 0), (13, 'api_loadbalancers_create', 0), (14, 'api_loadbalancers_get', 0), (15, 'api_loadbalancers_modify', 0), (16, 'api_loadbalancers_delete', 0), (17, 'api_healthmonitor_get', 0), (18, 'api_healthmonitor_modify', 0), (19, 'api_healthmonitor_delete', 0), (20, 'api_limits_get', 0), (21, 'api_log_archive', 0), (22, 'api_node_get', 0), (23, 'api_node_create', 0), (24, 'api_node_modify', 0), (25, 'api_node_delete', 0), (26, 'api_vips_get', 0); - -# Stats -CREATE TABLE stats ( - id BIGINT NOT NULL AUTO_INCREMENT, # unique id for this billing record - lbid BIGINT NOT NULL REFERENCES loadblancers(id), # fk for lbid - period_start DATETIME NOT NULL, # timestamp of when this period started - period_end DATETIME NOT NULL, # timestamp of when this period ended - bytes_out BIGINT NOT NULL, # bytes transferred in this period - status VARCHAR(50) NOT NULL, # Current LB status - PRIMARY KEY (id) # ids are unique across all LBs - ) ENGINE=InnoDB DEFAULT CHARSET latin1; - -# Ports -CREATE TABLE ports ( - id BIGINT NOT NULL AUTO_INCREMENT, # unique id - protocol VARCHAR(50) NOT NULL, # Ptotocol type (HTTP, TCP, etc) - portnum INT NOT NULL, # port number - enabled BOOLEAN NOT NULL DEFAULT FALSE, # enabled/disabled - PRIMARY KEY (id) # ids are unique across all LBs - ) ENGINE=InnoDB DEFAULT CHARSET latin1; - -INSERT INTO ports VALUES (1, 'HTTP', 80, true),(2, 'HTTP', 8080, false),(3, 'HTTP', 8088, false),(4,'TCP', 443, true),(5, 'TCP', 8443, false),(6, 'TCP', 3306, true),(7, 'GALERA', 3306, true); - diff --git a/libra/common/api/mnb.py b/libra/common/api/mnb.py deleted file mode 100644 index 6c878a04..00000000 --- a/libra/common/api/mnb.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import eventlet -eventlet.monkey_patch() - -from libra.common.options import CONF -from libra.common.api.lbaas import LoadBalancer, db_session -from libra.common.api.lbaas import Stats -from libra.openstack.common.notifier import api as notifier_api -from libra.openstack.common import timeutils -from libra.openstack.common import log as logging -from libra.openstack.common import rpc -from libra.openstack.common.rpc import common as rpc_common -from sqlalchemy.sql import func - - -LOG = logging.getLogger(__name__) - - -def update_mnb(event_type, lbid, tenant_id): - if CONF['billing_enable']: - eventlet.spawn_n(client_job, event_type, lbid, tenant_id) - - -def client_job(event_type, lbid, tenant_id): - - try: - if(event_type == 'lbaas.instance.create' or - event_type == 'lbaas.instance.delete'): - _send_create_or_delete(event_type, lbid, tenant_id) - elif event_type == 'lbaas.instance.exists': - _send_exists(event_type) - elif event_type == 'lbaas.bandwidth.usage': - _send_usage(event_type, lbid, tenant_id) - elif event_type == 'lbaas.instance.test': - _send_test(event_type, lbid, tenant_id) - return - - except: - LOG.exception("MnB notify: unhandled exception") - - LOG.error("MnB notification unsuccessful. Type {0}, loadbalancer {1} " - "tenant_id {2}".format(event_type, lbid, tenant_id)) - - -def _notify(service, event_type, payload): - priority = CONF['default_notification_level'] - publisher_id = notifier_api.publisher_id(service) - notifier_api.notify(None, publisher_id, event_type, priority, payload) - - -def test_mnb_connection(): - # Because the oslo notifier code does not have a return status - # and exceptions are caught inside oslo (I know...), the best we - # can do here is use the oslo rpc code to try a test connection - # to the MnB servers before the notification(s) are sent. - connected = False - try: - cx = rpc.create_connection() - cx.close() - LOG.info("Verified RPC connection is ready") - connected = True - except rpc_common.RPCException as e: - LOG.error("RPC connect exception: %s", e) - except Exception as e: - LOG.error("Non-RPC connect exception: %s", e) - return connected - - -def _send_create_or_delete(event_type, lbid, tenant_id): - - LOG.info( - "Sending MnB {0} notification to MnB for " - "loadbalancer {1} tenant_id {2}".format( - event_type, lbid, tenant_id) - ) - - if not test_mnb_connection(): - # Abort the notification - if event_type == 'lbaas.instance.create': - LOG.info("Aborting Create Notifications. Could not connect") - else: - LOG.info("Aborting Delete Notifications. Could not connect") - return - - with db_session() as session: - lb = session.query( - LoadBalancer.name, - LoadBalancer.status, - LoadBalancer.created, - LoadBalancer.updated - ).filter(LoadBalancer.id == lbid).\ - filter(LoadBalancer.tenantid == tenant_id).first() - - if lb is None: - session.rollback() - LOG.error("Load Balancer {0} not found for tenant {1}".format( - lbid, tenant_id)) - return - - if event_type == 'lbaas.instance.create': - date = lb.created - else: - date = lb.updated - - # Build the payload - payload = _build_payload(date, date, lb.name, lbid, - tenant_id, lb.status) - - _notify('lbaas', event_type, payload) - session.commit() - - -def _send_exists(event_type): - - LOG.info("Sending MnB {0} notifications to MnB".format(event_type)) - count = 0 - with db_session() as session: - lbs = session.query( - LoadBalancer.id, - LoadBalancer.tenantid, - LoadBalancer.name, - LoadBalancer.status, - LoadBalancer.created, - LoadBalancer.updated - ).filter(LoadBalancer.status != 'DELETED').all() - - if not lbs: - session.rollback() - LOG.error("No existing Load Balancers found") - return - - # Figure out our audit period beging/ending - seconds = (CONF['admin_api']['exists_freq'] * 60) - interval = datetime.timedelta(seconds=seconds) - audit_period_ending = timeutils.utcnow() - audit_period_beginning = audit_period_ending - interval - audit_period_beginning = str(audit_period_beginning) - audit_period_ending = str(audit_period_ending) - - for lb in lbs: - LOG.info( - "Sending MnB {0} notification to MnB for " - "loadbalancer {1} tenant_id {2}".format( - event_type, lb.id, lb.tenantid) - ) - - # Build the payload - payload = _build_payload(audit_period_beginning, - audit_period_ending, - lb.name, lb.id, lb.tenantid, lb.status) - - _notify('lbaas', event_type, payload) - count += 1 - - session.commit() - LOG.info("Sent {0} MnB {1} notifications to MnB".format(count, event_type)) - - -def _send_usage(event_type, start, stop): - - LOG.info("Sending MnB {0} notifications to MnB".format(event_type)) - N = CONF['admin_api']['usage_freq'] - - with db_session() as session: - - # Start by making sure we have stats in the Stats table and - # track the oldest value in case we need it below. - oldest, = session.query(Stats.period_end).\ - order_by(Stats.id.asc()).first() - - if oldest is None: - # No Stats at all - LOG.info("No usage statistics to send.") - session.rollback() - return - - if start is None: - # The value in the DB must be '0000-00-00 00:00:00 so - # as a starting point, we can find the oldest stat in - # the Stats table and start from there. No sense iterating - # from 0000-00-00 to now looking for stats to send. Also - # round it back to the previous update period - start = _rounded_down_min(oldest, N) - LOG.info("Starting usage notifications from first saved {0}". - format(start)) - - # Now that we know where to start, make sure we have stats to - # send for the time period. Use stats that end in this period. - # It's ok if the stats started in a previous period. Some skew - # is allowed. - total = session.query(Stats).\ - filter(Stats.period_end >= start).\ - filter(Stats.period_end < stop).\ - count() - if total == 0: - LOG.info("No usage statistics to send between {0} and {1}" - .format(start, stop)) - session.rollback() - return - - LOG.info("Found {0} total usage statistics to send between {1} and {2}" - .format(total, start, stop)) - - # Get info on all of our loadbalancers for the payloads. - loadbalancers = _get_lbs() - - # Get ready to loop through however N minute periods we - # have to send. We do it this way rather than one lump sum - # because finer grain data is probably needed on the MnB side. - end = start + datetime.timedelta(minutes=N) - count = 0 - while end <= stop: - # Loop through all N periods up to the current period - # sending usage notifications to MnB - stats = session.query( - Stats.lbid, - func.sum(Stats.bytes_out) - ).group_by(Stats.lbid).\ - filter(Stats.period_end >= start).\ - filter(Stats.period_end < end).\ - all() - - # Prep for the next loop here in case of continue - prev_start = start - prev_end = end - start = end - end = start + datetime.timedelta(minutes=N) - - if not stats: - LOG.info("No usage statistics to send for period {0} to {1}". - format(prev_start, prev_end)) - continue - else: - LOG.info("Sending usage statistics for {0} to {1}". - format(prev_start, prev_end)) - - audit_period_beginning = str(prev_start) - audit_period_ending = str(prev_end) - for lb in stats: - lbid, byte_count = lb - - byte_count = int(byte_count) - if lbid not in loadbalancers: - LOG.error("Loadbalancer {0} not found in DB " - "not sending usage statistics".format(lbid)) - continue - - # Build the payload - payload = _build_payload(audit_period_beginning, - audit_period_ending, - loadbalancers[lbid]["name"], - lbid, - loadbalancers[lbid]["tenant_id"], - loadbalancers[lbid]["status"]) - - payload["metrics"] = _build_metrics(byte_count) - - LOG.info( - "Sending MnB {0} notification to MnB for " - "loadbalancer {1} tenant_id {2} from " - "{3} to {4}: PAYLOAD = {5}". - format(event_type, - lbid, - loadbalancers[lbid]["tenant_id"], - prev_start, - prev_end, - payload) - ) - _notify('lbaas', event_type, payload) - count += 1 - - # Purge old stats - if CONF['admin_api']['stats_purge_enable']: - hours = CONF['admin_api']['stats_purge_days'] * 24 - delta = datetime.timedelta(hours=hours) - exp = timeutils.utcnow() - delta - exp_time = exp.strftime('%Y-%m-%d %H:%M:%S') - purged = session.query(Stats).\ - filter(Stats.period_end < exp_time).\ - delete() - LOG.info("Purged {0} usage statistics from before {1}". - format(purged, exp_time)) - - session.commit() - LOG.info("Sent {0} MnB {1} notifications to MnB".format(count, event_type)) - - -def _send_test(event_type, lbid, tenant_id): - - # Build the payload - now = str(timeutils.utcnow()) - LOG.error("Sending {0} test notifications".format(lbid)) - - if not test_mnb_connection(): - # Abort the test notifications - LOG.info("Aborting test Notifications. Could not connect") - return - - # Note lbid is the number of notifications to send - lbid += 1 - for x in xrange(1, lbid): - payload = _build_payload(now, now, "Test LB", str(x), - str(tenant_id), 'active') - _notify('lbaas', 'lbaas.instance.test', payload) - - -def _build_payload(begin, end, name, id, tenant, status): - return { - "audit_period_beginning": begin, - "audit_period_ending": end, - "display_name": name, - "id": id, - "type": "lbaas.std", - "type_id": 1, - "tenant_id": tenant, - "state": status.lower(), - "state_description": status.lower() - } - - -def _build_metrics(bytes): - return [{ - "metric_name": "lbaas.network.outgoing.bytes", - "metric_type": "gauge", - "metric_units": "BYTES", - "metric_value": bytes - }] - - -def _rounded_down_min(ts, N): - ts = ts - datetime.timedelta(minutes=ts.minute % N, - seconds=ts.second, - microseconds=ts.microsecond) - return ts - - -def _get_lbs(): - all_lbs = {} - with db_session() as session: - lbs = session.query( - LoadBalancer.id, - LoadBalancer.tenantid, - LoadBalancer.name, - LoadBalancer.status, - ).all() - - for lb in lbs: - all_lbs[lb.id] = { - "tenant_id": lb.tenantid, - "name": lb.name, - "status": lb.status - } - session.commit() - return all_lbs diff --git a/libra/common/api/server.py b/libra/common/api/server.py deleted file mode 100644 index 33079405..00000000 --- a/libra/common/api/server.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import eventlet - - -def make_socket(host, port, ssl_keyfile=None, ssl_certfile=None): - sock = eventlet.listen((host, port)) - # TODO: set ca_certs and cert_reqs=CERT_REQUIRED - if ssl_keyfile and ssl_certfile: - sock = eventlet.wrap_ssl(sock, certfile=ssl_certfile, - keyfile=ssl_keyfile, - server_side=True) - return sock diff --git a/libra/common/exc.py b/libra/common/exc.py deleted file mode 100644 index ffbbec17..00000000 --- a/libra/common/exc.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class DeletedStateError(Exception): - """ - Exception representing an invalid operation on a load balancer that - is in the deleted state. - """ - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) - - -class DetailError(Exception): - def __init__(self, message): - self.message = message - - def __str__(self): - return repr(self.message) - - -class ExhaustedError(DetailError): - """ - Exception representing that something is exhausted for free resources. - """ diff --git a/libra/common/faults.py b/libra/common/faults.py deleted file mode 100644 index 61144218..00000000 --- a/libra/common/faults.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - - -class ServiceFault(object): - def __init__(self, code, message, details): - self.code = code - self.message = message - self.details = details - - def to_json(self): - data = { - "serviceFault": { - "code": self.code, - "message": self.message, - "details": self.details - } - } - return data - - def __str__(self): - return json.dumps(self.to_json(), indent=4) - - -class BadRequest(ServiceFault): - def __init__(self, - validation_errors, - code="400", - message="Validation fault", - details="The object is not valid"): - ServiceFault.__init__(self, code, message, details) - self.validation_errors = validation_errors - - def to_json(self): - data = { - "badRequest": { - "code": self.code, - "message": self.message, - "details": self.details, - "validationErrors": { - "message": self.validation_errors - } - } - } - return data - - def __str__(self): - return json.dumps(self.to_json(), indent=4) diff --git a/libra/common/json_gearman.py b/libra/common/json_gearman.py deleted file mode 100644 index 503a8835..00000000 --- a/libra/common/json_gearman.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -from gearman import GearmanClient, GearmanWorker, DataEncoder - - -class JSONDataEncoder(DataEncoder): - """ Class to transform data that the worker either receives or sends. """ - - @classmethod - def encode(cls, encodable_object): - """ Encode JSON object as string """ - return json.dumps(encodable_object) - - @classmethod - def decode(cls, decodable_string): - """ Decode string to JSON object """ - return json.loads(decodable_string) - - -class JSONGearmanWorker(GearmanWorker): - """ Overload the Gearman worker class so we can set the data encoder. """ - data_encoder = JSONDataEncoder - - -class JSONGearmanClient(GearmanClient): - """ Overload the Gearman client class so we can set the data encoder. """ - data_encoder = JSONDataEncoder diff --git a/libra/common/log.py b/libra/common/log.py deleted file mode 100644 index e604a857..00000000 --- a/libra/common/log.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations - -import logging -import logging.handlers -import gzip -import os -import sys -import time -import glob -import codecs -from libra.openstack.common import log - - -def get_descriptors(): - """ - Utility method to get all Oslo logging filedescrptiors. - - Needs to be called after log.setup(...) - """ - descriptors = [] - - def _add(fh): - if fh not in descriptors: - descriptors.append(fh) - - for logger in log._loggers.values(): - for handler in logger.handlers: - _add(handler.stream) - for i in logging.root.handlers: - _add(i.stream) - return descriptors - - -class NewlineFormatter(logging.Formatter): - def format(self, record): - record.message = record.getMessage() - if self.usesTime(): - record.asctime = self.formatTime(record, self.datefmt) - s = self._fmt % record.__dict__ - if record.exc_info: - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - if record.exc_text: - try: - s = s + record.exc_text - except UnicodeError: - s = s + record.exc_text.decode(sys.getfilesystemencoding(), - 'replace') - s = s.replace('\n', ' ') - return s - - -class CompressedTimedRotatingFileHandler( - logging.handlers.TimedRotatingFileHandler -): - def doRollover(self): - self.stream.close() - t = self.rolloverAt - self.interval - timeTuple = time.localtime(t) - tfn = '{0}.{1}'.format( - self.baseFilename, time.strftime(self.suffix, timeTuple) - ) - if os.path.exists(tfn): - os.remove(tfn) - os.rename(self.baseFilename, tfn) - # Delete oldest log - # TODO: clear multiple old logs - if self.backupCount > 0: - s = glob.glob('{0}.20*'.format(self.baseFilename)) - if len(s) > self.backupCount: - s.sort() - os.remove(s[0]) - if self.encoding: - self.stream = codecs.open(self.baseFilename, 'w', self.encoding) - else: - self.stream = open(self.baseFilename, 'w') - currentTime = int(time.time()) - while self.rolloverAt <= currentTime: - self.rolloverAt = self.rolloverAt + self.interval - zfile = '{0}.gz'.format(tfn) - if os.path.exists(zfile): - os.remove(zfile) - f_in = open(tfn, "rb") - f_out = gzip.open(zfile, "wb") - f_out.writelines(f_in) - f_out.close() - f_in.close() - os.remove(tfn) diff --git a/libra/common/options.py b/libra/common/options.py deleted file mode 100644 index 80e97e13..00000000 --- a/libra/common/options.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations - -import os -import os.path - -from oslo.config import cfg - -CONF = cfg.CONF - -common_cli_opts = [ - cfg.BoolOpt('daemon', - default=True, - help='Run as a daemon'), - cfg.StrOpt('group', - help='Group to use for daemon mode'), - cfg.StrOpt('user', - help='User to use for daemon mode'), - cfg.BoolOpt('billing_enable', - default=False, - help='Enable or disable MnB notifictions') -] - -gearman_opts = [ - cfg.BoolOpt('keepalive', - default=False, - help='Enable TCP KEEPALIVE pings'), - cfg.IntOpt('keepcnt', - metavar='COUNT', - help='Max KEEPALIVE probes to send before killing connection'), - cfg.IntOpt('keepidle', - metavar='SECONDS', - help='Seconds of idle time before sending KEEPALIVE probes'), - cfg.IntOpt('keepintvl', - metavar='SECONDS', - help='Seconds between TCP KEEPALIVE probes'), - cfg.IntOpt('poll', - default=1, - metavar='SECONDS', - help='Gearman worker polling timeout'), - cfg.IntOpt('reconnect_sleep', - default=60, - metavar='SECONDS', - help='Seconds to sleep between job server reconnects'), - cfg.ListOpt('servers', - default=['localhost:4730'], - metavar='HOST:PORT,...', - help='List of Gearman job servers'), - cfg.StrOpt('ssl_ca', - metavar='FILE', - help='Gearman SSL certificate authority'), - cfg.StrOpt('ssl_cert', - metavar='FILE', - help='Gearman SSL certificate'), - cfg.StrOpt('ssl_key', - metavar='FILE', - help='Gearman SSL key'), -] - - -def add_common_opts(): - CONF.register_opts(gearman_opts, group='gearman') - CONF.register_cli_opts(common_cli_opts) - - -def check_gearman_ssl_files(): - """ - If using Gearman with SSL, validate that the SSL files exist and - are readable by the user. It's a common problem that connections to - Gearman will silently fail because these files cannot be read due to - the private key being readable only by the file owner. - """ - if 'gearman' not in CONF: - return - for key in ['ssl_ca', 'ssl_cert', 'ssl_key']: - if key in CONF['gearman']: - fname = CONF['gearman'][key] - if fname is None: - continue - if not os.path.exists(fname): - raise Exception("Gearman SSL file %s does not exist" % fname) - if not os.access(fname, os.R_OK): - raise Exception("Unable to read Gearman SSL file %s" % fname) diff --git a/libra/gear/__init__.py b/libra/gear/__init__.py deleted file mode 100644 index 1bc749cd..00000000 --- a/libra/gear/__init__.py +++ /dev/null @@ -1,2918 +0,0 @@ -# Copyright 2013-2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import logging -import os -import select -import socket -import ssl -import struct -import threading -import time -import uuid as uuid_module - -from libra.gear import constants -from libra.gear.acl import ACLError, ACLEntry, ACL # noqa - -try: - import Queue as queue -except ImportError: - import queue as queue - -try: - import statsd -except ImportError: - statsd = None - -PRECEDENCE_NORMAL = 0 -PRECEDENCE_LOW = 1 -PRECEDENCE_HIGH = 2 - -POLLIN = 0x001 -POLLOUT = 0x004 -POLLERR = 0x008 -POLLHUP = 0x010 -POLLNVAL = 0x020 - -# eventlet/gevent doesn't support select.poll. If select.poll is used, -# python interpreter is blocked as a whole instead of switching from the -# current thread that is about to block to other runnable thread. -# So emulate select.poll by select.select because using python means that -# performance isn't so important. -try: - import eventlet.patcher - - def _using_eventlet_green_select(): - return eventlet.patcher.is_monkey_patched(select) -except: - def _using_eventlet_green_select(): - return False - - -class _SelectSelect(object): - """ select.poll emulation by using select.select. -Only register and poll are needed at the moment. -""" - - def __init__(self): - self.rlist = [] - self.wlist = [] - self.xlist = [] - - def register(self, fd, events): - if isinstance(fd, socket.socket): - fd = fd.fileno() - assert isinstance(fd, int) - if events & POLLIN: - self.rlist.append(fd) - events &= ~POLLIN - if events & POLLOUT: - self.wlist.append(fd) - events &= ~POLLOUT - if events: - self.xlist.append(fd) - - def poll(self, timeout=-1): - if timeout == -1: - # epoll uses -1 for infinite timeout, select uses None. - timeout = None - else: - timeout = float(timeout) / 1000 - # XXX workaround a bug in eventlet - # see https://github.com/eventlet/eventlet/pull/25 - if timeout == 0 and _using_eventlet_green_select(): - timeout = 0.1 - - rlist, wlist, xlist = select.select(self.rlist, self.wlist, self.xlist, - timeout) - # collections.defaultdict is introduced by python 2.5 and - # XenServer uses python 2.4. We don't use it for XenServer. - # events_dict = collections.defaultdict(int) - # events_dict[fd] |= event - events_dict = {} - for fd in rlist: - events_dict[fd] = events_dict.get(fd, 0) | POLLIN - for fd in wlist: - events_dict[fd] = events_dict.get(fd, 0) | POLLOUT - for fd in xlist: - events_dict[fd] = events_dict.get(fd, 0) | (POLLERR | - POLLHUP | - POLLNVAL) - return events_dict.items() - - -if _using_eventlet_green_select: - logging.debug("eventlet") - SelectPoll = _SelectSelect -else: - SelectPoll = select.poll # use the default - - -class ConnectionError(Exception): - pass - - -class InvalidDataError(Exception): - pass - - -class ConfigurationError(Exception): - pass - - -class NoConnectedServersError(Exception): - pass - - -class UnknownJobError(Exception): - pass - - -class InterruptedError(Exception): - pass - - -class TimeoutError(Exception): - pass - - -class GearmanError(Exception): - pass - - -def convert_to_bytes(data): - try: - data = data.encode('utf8') - except AttributeError: - pass - return data - - -class Task(object): - def __init__(self): - self._wait_event = threading.Event() - - def setComplete(self): - self._wait_event.set() - - def wait(self, timeout=None): - """Wait for a response from Gearman. - - :arg int timeout: If not None, return after this many seconds if no - response has been received (default: None). - """ - - self._wait_event.wait(timeout) - return self._wait_event.is_set() - - -class SubmitJobTask(Task): - def __init__(self, job): - super(SubmitJobTask, self).__init__() - self.job = job - - -class OptionReqTask(Task): - pass - - -class Connection(object): - """A Connection to a Gearman Server. - - :arg str client_id: The client ID associated with this connection. - It will be appending to the name of the logger (e.g., - gear.Connection.client_id). Defaults to 'unknown'. - """ - - def __init__(self, host, port, ssl_key=None, ssl_cert=None, ssl_ca=None, - client_id='unknown'): - self.log = logging.getLogger("gear.Connection.%s" % (client_id,)) - self.host = host - self.port = port - self.ssl_key = ssl_key - self.ssl_cert = ssl_cert - self.ssl_ca = ssl_ca - - self.use_ssl = False - if all([self.ssl_key, self.ssl_cert, self.ssl_ca]): - self.use_ssl = True - - self.echo_lock = threading.Lock() - self._init() - - def _init(self): - self.conn = None - self.connected = False - self.connect_time = None - self.related_jobs = {} - self.pending_tasks = [] - self.admin_requests = [] - self.echo_conditions = {} - self.options = set() - self.changeState("INIT") - - def changeState(self, state): - # The state variables are provided as a convenience (and used by - # the Worker implementation). They aren't used or modified within - # the connection object itself except to reset to "INIT" immediately - # after reconnection. - self.log.debug("Setting state to: %s" % state) - self.state = state - self.state_time = time.time() - - def __repr__(self): - return '' % ( - id(self), self.host, self.port) - - def connect(self): - """Open a connection to the server. - - :raises ConnectionError: If unable to open the socket. - """ - - self.log.debug("Connecting to %s port %s" % (self.host, self.port)) - s = None - for res in socket.getaddrinfo(self.host, self.port, - socket.AF_UNSPEC, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - try: - s = socket.socket(af, socktype, proto) - except socket.error: - s = None - continue - - if self.use_ssl: - self.log.debug("Using SSL") - s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1, - cert_reqs=ssl.CERT_REQUIRED, - keyfile=self.ssl_key, - certfile=self.ssl_cert, - ca_certs=self.ssl_ca) - - try: - s.connect(sa) - except socket.error: - s.close() - s = None - continue - break - if s is None: - self.log.debug("Error connecting to %s port %s" % ( - self.host, self.port)) - raise ConnectionError("Unable to open socket") - self.log.info("Connected to %s port %s" % (self.host, self.port)) - self.conn = s - self.connected = True - self.connect_time = time.time() - - def disconnect(self): - """Disconnect from the server and remove all associated state - data. - """ - - if self.conn: - try: - self.conn.close() - except Exception: - pass - - self.log.info("Disconnected from %s port %s" % (self.host, self.port)) - self._init() - - def reconnect(self): - """Disconnect from and reconnect to the server, removing all - associated state data. - """ - self.disconnect() - self.connect() - - def sendRaw(self, data): - """Send raw data over the socket. - - :arg bytes data The raw data to send - """ - while True: - try: - self.conn.send(data) - except ssl.SSLError as e: - if e.errno == ssl.SSL_ERROR_WANT_READ: - continue - elif e.errno == ssl.SSL_ERROR_WANT_WRITE: - continue - else: - raise - break - - def sendPacket(self, packet): - """Send a packet to the server. - - :arg Packet packet: The :py:class:`Packet` to send. - """ - self.log.info("Sending packet to %s: %s" % (self, packet)) - self.sendRaw(packet.toBinary()) - - def _getAdminRequest(self): - return self.admin_requests.pop(0) - - def _readRawBytes(self, bytes_to_read): - while True: - try: - buff = self.conn.recv(bytes_to_read) - except ssl.SSLError as e: - if e.errno == ssl.SSL_ERROR_WANT_READ: - continue - elif e.errno == ssl.SSL_ERROR_WANT_WRITE: - continue - else: - raise - break - - bytes_read = len(buff) - if self.use_ssl and (bytes_read < bytes_to_read): - remaining = self.conn.pending() - while remaining and (bytes_read < bytes_to_read): - buff += self.conn.recv(bytes_to_read - bytes_read) - remaining = self.conn.pending() - bytes_read = len(buff) - - return buff - - def readPacket(self): - """Read one packet or administrative response from the server. - - Blocks until the complete packet or response is read. - - :returns: The :py:class:`Packet` or :py:class:`AdminRequest` read. - :rtype: :py:class:`Packet` or :py:class:`AdminRequest` - """ - packet = b'' - datalen = 0 - code = None - ptype = None - admin = None - admin_request = None - while True: - c = self._readRawBytes(1) - if not c: - return None - if admin is None: - if c == b'\x00': - admin = False - else: - admin = True - admin_request = self._getAdminRequest() - packet += c - if admin: - if admin_request.isComplete(packet): - return admin_request - else: - if len(packet) == 12: - code, ptype, datalen = struct.unpack('!4sii', packet) - if len(packet) == datalen + 12: - return Packet(code, ptype, packet[12:], connection=self) - - def sendAdminRequest(self, request, timeout=90): - """Send an administrative request to the server. - - :arg AdminRequest request: The :py:class:`AdminRequest` to send. - :arg numeric timeout: Number of seconds to wait until the response - is received. If None, wait forever (default: 90 seconds). - :raises TimeoutError: If the timeout is reached before the response - is received. - """ - self.admin_requests.append(request) - self.sendRaw(request.getCommand()) - complete = request.waitForResponse(timeout) - if not complete: - raise TimeoutError() - - def echo(self, data=None, timeout=30): - """Perform an echo test on the server. - - This method waits until the echo response has been received or the - timeout has been reached. - - :arg bytes data: The data to request be echoed. If None, a random - unique byte string will be generated. - :arg numeric timeout: Number of seconds to wait until the response - is received. If None, wait forever (default: 30 seconds). - :raises TimeoutError: If the timeout is reached before the response - is received. - """ - if data is None: - data = uuid_module.uuid4().hex.encode('utf8') - self.echo_lock.acquire() - try: - if data in self.echo_conditions: - raise InvalidDataError("This client is already waiting on an " - "echo response of: %s" % data) - condition = threading.Condition() - self.echo_conditions[data] = condition - finally: - self.echo_lock.release() - - self.sendEchoReq(data) - - condition.acquire() - condition.wait(timeout) - condition.release() - - if data in self.echo_conditions: - return data - raise TimeoutError() - - def sendEchoReq(self, data): - p = Packet(constants.REQ, constants.ECHO_REQ, data) - self.sendPacket(p) - - def handleEchoRes(self, data): - condition = None - self.echo_lock.acquire() - try: - condition = self.echo_conditions.get(data) - if condition: - del self.echo_conditions[data] - finally: - self.echo_lock.release() - - if not condition: - return False - condition.notifyAll() - return True - - def handleOptionRes(self, option): - self.options.add(option) - - -class AdminRequest(object): - """Encapsulates a request (and response) sent over the - administrative protocol. This is a base class that may not be - instantiated dircectly; a subclass implementing a specific command - must be used instead. - - :arg list arguments: A list of byte string arguments for the command. - - The following instance attributes are available: - - **response** (bytes) - The response from the server. - **arguments** (bytes) - The argument supplied with the constructor. - **command** (bytes) - The administrative command. - """ - - command = None - arguments = [] - response = None - - def __init__(self, *arguments): - self.wait_event = threading.Event() - self.arguments = arguments - if type(self) == AdminRequest: - raise NotImplementedError("AdminRequest must be subclassed") - - def __repr__(self): - return '' % ( - id(self), self.command) - - def getCommand(self): - cmd = self.command - if self.arguments: - cmd += b' ' + b' '.join(self.arguments) - cmd += b'\n' - return cmd - - def isComplete(self, data): - if (data[-3:] == b'\n.\n' or - data[-5:] == b'\r\n.\r\n' or - data == b'.\n' or - data == b'.\r\n'): - self.response = data - return True - return False - - def setComplete(self): - self.wait_event.set() - - def waitForResponse(self, timeout=None): - self.wait_event.wait(timeout) - return self.wait_event.is_set() - - -class StatusAdminRequest(AdminRequest): - """A "status" administrative request. - - The response from gearman may be found in the **response** attribute. - """ - command = b'status' - - def __init__(self): - super(StatusAdminRequest, self).__init__() - - -class ShowJobsAdminRequest(AdminRequest): - """A "show jobs" administrative request. - - The response from gearman may be found in the **response** attribute. - """ - command = b'show jobs' - - def __init__(self): - super(ShowJobsAdminRequest, self).__init__() - - -class ShowUniqueJobsAdminRequest(AdminRequest): - """A "show unique jobs" administrative request. - - The response from gearman may be found in the **response** attribute. - """ - - command = b'show unique jobs' - - def __init__(self): - super(ShowUniqueJobsAdminRequest, self).__init__() - - -class CancelJobAdminRequest(AdminRequest): - """A "cancel job" administrative request. - - :arg str handle: The job handle to be canceled. - - The response from gearman may be found in the **response** attribute. - """ - - command = b'cancel job' - - def __init__(self, handle): - handle = convert_to_bytes(handle) - super(CancelJobAdminRequest, self).__init__(handle) - - def isComplete(self, data): - if data[-1:] == b'\n': - self.response = data - return True - return False - - -class VersionAdminRequest(AdminRequest): - """A "version" administrative request. - - The response from gearman may be found in the **response** attribute. - """ - - command = b'version' - - def __init__(self): - super(VersionAdminRequest, self).__init__() - - def isComplete(self, data): - if data[-1:] == b'\n': - self.response = data - return True - return False - - -class WorkersAdminRequest(AdminRequest): - """A "workers" administrative request. - - The response from gearman may be found in the **response** attribute. - """ - command = b'workers' - - def __init__(self): - super(WorkersAdminRequest, self).__init__() - - -class Packet(object): - """A data packet received from or to be sent over a - :py:class:`Connection`. - - :arg bytes code: The Gearman magic code (:py:data:`constants.REQ` or - :py:data:`constants.RES`) - :arg bytes ptype: The packet type (one of the packet types in - constants). - :arg bytes data: The data portion of the packet. - :arg Connection connection: The connection on which the packet - was received (optional). - :raises InvalidDataError: If the magic code is unknown. - """ - - def __init__(self, code, ptype, data, connection=None): - if not isinstance(code, bytes) and not isinstance(code, bytearray): - raise TypeError("code must be of type bytes or bytearray") - if code[0:1] != b'\x00': - raise InvalidDataError("First byte of packet must be 0") - self.code = code - self.ptype = ptype - if not isinstance(data, bytes) and not isinstance(data, bytearray): - raise TypeError("data must be of type bytes or bytearray") - self.data = data - self.connection = connection - - def __repr__(self): - ptype = constants.types.get(self.ptype, 'UNKNOWN') - try: - extra = self._formatExtraData() - except Exception: - extra = '' - return '' % (id(self), ptype, extra) - - def _formatExtraData(self): - if self.ptype in [constants.JOB_CREATED, - constants.JOB_ASSIGN, - constants.GET_STATUS, - constants.STATUS_RES, - constants.WORK_STATUS, - constants.WORK_COMPLETE, - constants.WORK_FAIL, - constants.WORK_EXCEPTION, - constants.WORK_DATA, - constants.WORK_WARNING]: - return ' handle: %s' % self.getArgument(0) - - if self.ptype == constants.JOB_ASSIGN_UNIQ: - print self.data - return (' handle: %s function: %s unique: %s' % - (self.getArgument(0), - self.getArgument(1), - self.getArgument(2))) - - if self.ptype in [constants.SUBMIT_JOB, - constants.SUBMIT_JOB_BG, - constants.SUBMIT_JOB_HIGH, - constants.SUBMIT_JOB_HIGH_BG, - constants.SUBMIT_JOB_LOW, - constants.SUBMIT_JOB_LOW_BG, - constants.SUBMIT_JOB_SCHED, - constants.SUBMIT_JOB_EPOCH]: - return ' function: %s unique: %s' % (self.getArgument(0), - self.getArgument(1)) - - if self.ptype in [constants.CAN_DO, - constants.CANT_DO, - constants.CAN_DO_TIMEOUT]: - return ' function: %s' % (self.getArgument(0),) - - if self.ptype == constants.SET_CLIENT_ID: - return ' id: %s' % (self.getArgument(0),) - - if self.ptype in [constants.OPTION_REQ, - constants.OPTION_RES]: - return ' option: %s' % (self.getArgument(0),) - - if self.ptype == constants.ERROR: - return ' code: %s message: %s' % (self.getArgument(0), - self.getArgument(1)) - return '' - - def toBinary(self): - """Return a Gearman wire protocol binary representation of the packet. - - :returns: The packet in binary form. - :rtype: bytes - """ - b = struct.pack('!4sii', self.code, self.ptype, len(self.data)) - b = bytearray(b) - b += self.data - return b - - def getArgument(self, index, last=False): - """Get the nth argument from the packet data. - - :arg int index: The argument index to look up. - :arg bool last: Whether this is the last argument (and thus - nulls should be ignored) - :returns: The argument value. - :rtype: bytes - """ - - parts = self.data.split(b'\x00') - if not last: - return parts[index] - return b'\x00'.join(parts[index:]) - - def getJob(self): - """Get the :py:class:`Job` associated with the job handle in - this packet. - - :returns: The :py:class:`Job` for this packet. - :rtype: Job - :raises UnknownJobError: If the job is not known. - """ - handle = self.getArgument(0) - job = self.connection.related_jobs.get(handle) - if not job: - raise UnknownJobError() - return job - - -class BaseClientServer(object): - def __init__(self, client_id=None): - if client_id: - self.client_id = convert_to_bytes(client_id) - self.log = logging.getLogger("gear.BaseClientServer.%s" % - (self.client_id,)) - else: - self.client_id = None - self.log = logging.getLogger("gear.BaseClientServer") - self.running = True - self.active_connections = [] - self.inactive_connections = [] - - self.connection_index = -1 - # A lock and notification mechanism to handle not having any - # current connections - self.connections_condition = threading.Condition() - - # A pipe to wake up the poll loop in case it needs to restart - self.wake_read, self.wake_write = os.pipe() - - self.poll_thread = threading.Thread(name="Gearman client poll", - target=self._doPollLoop) - self.poll_thread.daemon = True - self.poll_thread.start() - self.connect_thread = threading.Thread(name="Gearman client connect", - target=self._doConnectLoop) - self.connect_thread.daemon = True - self.connect_thread.start() - - def _doConnectLoop(self): - # Outer run method of the reconnection thread - while self.running: - self.connections_condition.acquire() - while self.running and not self.inactive_connections: - self.log.debug("Waiting for change in available servers " - "to reconnect") - self.connections_condition.wait() - self.connections_condition.release() - self.log.debug("Checking if servers need to be reconnected") - try: - if self.running and not self._connectLoop(): - # Nothing happened - time.sleep(2) - except Exception: - self.log.exception("Exception in connect loop:") - - def _connectLoop(self): - # Inner method of the reconnection loop, triggered by - # a connection change - success = False - for conn in self.inactive_connections[:]: - self.log.debug("Trying to reconnect %s" % conn) - try: - conn.reconnect() - except ConnectionError: - self.log.debug("Unable to connect to %s" % conn) - continue - except Exception: - self.log.exception("Exception while connecting to %s" % conn) - continue - - try: - self._onConnect(conn) - except Exception: - self.log.exception("Exception while performing on-connect " - "tasks for %s" % conn) - continue - self.connections_condition.acquire() - self.inactive_connections.remove(conn) - self.active_connections.append(conn) - self.connections_condition.notifyAll() - os.write(self.wake_write, b'1\n') - self.connections_condition.release() - - try: - self._onActiveConnection(conn) - except Exception: - self.log.exception("Exception while performing active conn " - "tasks for %s" % conn) - - success = True - return success - - def _onConnect(self, conn): - # Called immediately after a successful (re-)connection - pass - - def _onActiveConnection(self, conn): - # Called immediately after a connection is activated - pass - - def _lostConnection(self, conn): - # Called as soon as a connection is detected as faulty. Remove - # it and return ASAP and let the connection thread deal with it. - self.log.debug("Marking %s as disconnected" % conn) - self.connections_condition.acquire() - try: - jobs = conn.related_jobs.values() - if conn in self.active_connections: - self.active_connections.remove(conn) - if conn not in self.inactive_connections: - self.inactive_connections.append(conn) - finally: - self.connections_condition.notifyAll() - self.connections_condition.release() - for job in jobs: - self.handleDisconnect(job) - - def _doPollLoop(self): - # Outer run method of poll thread. - while self.running: - self.connections_condition.acquire() - while self.running and not self.active_connections: - self.log.debug("Waiting for change in available connections " - "to poll") - self.connections_condition.wait() - self.connections_condition.release() - try: - self._pollLoop() - except socket.error, e: - if e.errno == errno.ECONNRESET: - self.log.debug("Connection reset by peer") - # This will get logged later at info level as - # "Marking ... as disconnected" - except Exception: - self.log.exception("Exception in poll loop:") - - def _pollLoop(self): - # Inner method of poll loop - self.log.debug("Preparing to poll") - poll = SelectPoll() - bitmask = (select.POLLIN | select.POLLERR | - select.POLLHUP | select.POLLNVAL) - # Reverse mapping of fd -> connection - conn_dict = {} - for conn in self.active_connections: - poll.register(conn.conn.fileno(), bitmask) - conn_dict[conn.conn.fileno()] = conn - # Register the wake pipe so that we can break if we need to - # reconfigure connections - poll.register(self.wake_read, bitmask) - while self.running: - self.log.debug("Polling %s connections" % - len(self.active_connections)) - ret = poll.poll() - for fd, event in ret: - if fd == self.wake_read: - self.log.debug("Woken by pipe") - while True: - if os.read(self.wake_read, 1) == b'\n': - break - return - conn = conn_dict[fd] - if event & select.POLLIN: - self.log.debug("Processing input on %s" % conn) - p = conn.readPacket() - if p: - if isinstance(p, Packet): - self.handlePacket(p) - else: - self.handleAdminRequest(p) - else: - self.log.debug("Received no data on %s" % conn) - self._lostConnection(conn) - return - else: - self.log.debug("Received error event on %s" % conn) - self._lostConnection(conn) - return - - def handlePacket(self, packet): - """Handle a received packet. - - This method is called whenever a packet is received from any - connection. It normally calls the handle method appropriate - for the specific packet. - - :arg Packet packet: The :py:class:`Packet` that was received. - """ - - self.log.info("Received packet from %s: %s" % (packet.connection, - packet)) - start = time.time() - if packet.ptype == constants.JOB_CREATED: - self.handleJobCreated(packet) - elif packet.ptype == constants.WORK_COMPLETE: - self.handleWorkComplete(packet) - elif packet.ptype == constants.WORK_FAIL: - self.handleWorkFail(packet) - elif packet.ptype == constants.WORK_EXCEPTION: - self.handleWorkException(packet) - elif packet.ptype == constants.WORK_DATA: - self.handleWorkData(packet) - elif packet.ptype == constants.WORK_WARNING: - self.handleWorkWarning(packet) - elif packet.ptype == constants.WORK_STATUS: - self.handleWorkStatus(packet) - elif packet.ptype == constants.STATUS_RES: - self.handleStatusRes(packet) - elif packet.ptype == constants.GET_STATUS: - self.handleGetStatus(packet) - elif packet.ptype == constants.JOB_ASSIGN_UNIQ: - self.handleJobAssignUnique(packet) - elif packet.ptype == constants.JOB_ASSIGN: - self.handleJobAssign(packet) - elif packet.ptype == constants.NO_JOB: - self.handleNoJob(packet) - elif packet.ptype == constants.NOOP: - self.handleNoop(packet) - elif packet.ptype == constants.SUBMIT_JOB: - self.handleSubmitJob(packet) - elif packet.ptype == constants.SUBMIT_JOB_BG: - self.handleSubmitJobBg(packet) - elif packet.ptype == constants.SUBMIT_JOB_HIGH: - self.handleSubmitJobHigh(packet) - elif packet.ptype == constants.SUBMIT_JOB_HIGH_BG: - self.handleSubmitJobHighBg(packet) - elif packet.ptype == constants.SUBMIT_JOB_LOW: - self.handleSubmitJobLow(packet) - elif packet.ptype == constants.SUBMIT_JOB_LOW_BG: - self.handleSubmitJobLowBg(packet) - elif packet.ptype == constants.SUBMIT_JOB_SCHED: - self.handleSubmitJobSched(packet) - elif packet.ptype == constants.SUBMIT_JOB_EPOCH: - self.handleSubmitJobEpoch(packet) - elif packet.ptype == constants.GRAB_JOB_UNIQ: - self.handleGrabJobUniq(packet) - elif packet.ptype == constants.GRAB_JOB: - self.handleGrabJob(packet) - elif packet.ptype == constants.PRE_SLEEP: - self.handlePreSleep(packet) - elif packet.ptype == constants.SET_CLIENT_ID: - self.handleSetClientID(packet) - elif packet.ptype == constants.CAN_DO: - self.handleCanDo(packet) - elif packet.ptype == constants.CAN_DO_TIMEOUT: - self.handleCanDoTimeout(packet) - elif packet.ptype == constants.CANT_DO: - self.handleCantDo(packet) - elif packet.ptype == constants.RESET_ABILITIES: - self.handleResetAbilities(packet) - elif packet.ptype == constants.ECHO_REQ: - self.handleEchoReq(packet) - elif packet.ptype == constants.ECHO_RES: - self.handleEchoRes(packet) - elif packet.ptype == constants.ERROR: - self.handleError(packet) - elif packet.ptype == constants.ALL_YOURS: - self.handleAllYours(packet) - elif packet.ptype == constants.OPTION_REQ: - self.handleOptionReq(packet) - elif packet.ptype == constants.OPTION_RES: - self.handleOptionRes(packet) - else: - self.log.error("Received unknown packet: %s" % packet) - end = time.time() - self.reportTimingStats(packet.ptype, end - start) - - def reportTimingStats(self, ptype, duration): - """Report processing times by packet type - - This method is called by handlePacket to report how long - processing took for each packet. The default implementation - does nothing. - - :arg bytes ptype: The packet type (one of the packet types in - constants). - :arg float duration: The time (in seconds) it took to process - the packet. - """ - pass - - def _defaultPacketHandler(self, packet): - self.log.error("Received unhandled packet: %s" % packet) - - def handleJobCreated(self, packet): - return self._defaultPacketHandler(packet) - - def handleWorkComplete(self, packet): - return self._defaultPacketHandler(packet) - - def handleWorkFail(self, packet): - return self._defaultPacketHandler(packet) - - def handleWorkException(self, packet): - return self._defaultPacketHandler(packet) - - def handleWorkData(self, packet): - return self._defaultPacketHandler(packet) - - def handleWorkWarning(self, packet): - return self._defaultPacketHandler(packet) - - def handleWorkStatus(self, packet): - return self._defaultPacketHandler(packet) - - def handleStatusRes(self, packet): - return self._defaultPacketHandler(packet) - - def handleGetStatus(self, packet): - return self._defaultPacketHandler(packet) - - def handleJobAssignUnique(self, packet): - return self._defaultPacketHandler(packet) - - def handleJobAssign(self, packet): - return self._defaultPacketHandler(packet) - - def handleNoJob(self, packet): - return self._defaultPacketHandler(packet) - - def handleNoop(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJob(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJobBg(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJobHigh(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJobHighBg(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJobLow(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJobLowBg(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJobSched(self, packet): - return self._defaultPacketHandler(packet) - - def handleSubmitJobEpoch(self, packet): - return self._defaultPacketHandler(packet) - - def handleGrabJobUniq(self, packet): - return self._defaultPacketHandler(packet) - - def handleGrabJob(self, packet): - return self._defaultPacketHandler(packet) - - def handlePreSleep(self, packet): - return self._defaultPacketHandler(packet) - - def handleSetClientID(self, packet): - return self._defaultPacketHandler(packet) - - def handleCanDo(self, packet): - return self._defaultPacketHandler(packet) - - def handleCanDoTimeout(self, packet): - return self._defaultPacketHandler(packet) - - def handleCantDo(self, packet): - return self._defaultPacketHandler(packet) - - def handleResetAbilities(self, packet): - return self._defaultPacketHandler(packet) - - def handleEchoReq(self, packet): - return self._defaultPacketHandler(packet) - - def handleEchoRes(self, packet): - return self._defaultPacketHandler(packet) - - def handleError(self, packet): - return self._defaultPacketHandler(packet) - - def handleAllYours(self, packet): - return self._defaultPacketHandler(packet) - - def handleOptionReq(self, packet): - return self._defaultPacketHandler(packet) - - def handleOptionRes(self, packet): - return self._defaultPacketHandler(packet) - - def handleAdminRequest(self, request): - """Handle an administrative command response from Gearman. - - This method is called whenever a response to a previously - issued administrative command is received from one of this - client's connections. It normally releases the wait lock on - the initiating AdminRequest object. - - :arg AdminRequest request: The :py:class:`AdminRequest` that - initiated the received response. - """ - - self.log.info("Received admin data %s" % request) - request.setComplete() - - def shutdown(self): - """Close all connections and stop all running threads. - - The object may no longer be used after shutdown is called. - """ - self.log.debug("Beginning shutdown") - self._shutdown() - self.log.debug("Beginning cleanup") - self._cleanup() - self.log.debug("Finished shutdown") - - def _shutdown(self): - # The first part of the shutdown process where all threads - # are told to exit. - self.running = False - self.connections_condition.acquire() - try: - self.connections_condition.notifyAll() - os.write(self.wake_write, b'1\n') - finally: - self.connections_condition.release() - - def _cleanup(self): - # The second part of the shutdown process where we wait for all - # threads to exit and then clean up. - self.poll_thread.join() - self.connect_thread.join() - for connection in self.active_connections: - connection.disconnect() - self.active_connections = [] - self.inactive_connections = [] - os.close(self.wake_read) - os.close(self.wake_write) - - -class BaseClient(BaseClientServer): - def __init__(self, client_id='unknown'): - super(BaseClient, self).__init__(client_id) - self.log = logging.getLogger("gear.BaseClient.%s" % (self.client_id,)) - # A lock to use when sending packets that set the state across - # all known connections. Note that it doesn't necessarily need - # to be used for all broadcasts, only those that affect multi- - # connection state, such as setting options or functions. - self.broadcast_lock = threading.RLock() - - def addServer(self, host, port=4730, - ssl_key=None, ssl_cert=None, ssl_ca=None): - """Add a server to the client's connection pool. - - Any number of Gearman servers may be added to a client. The - client will connect to all of them and send jobs to them in a - round-robin fashion. When servers are disconnected, the - client will automatically remove them from the pool, - continuously try to reconnect to them, and return them to the - pool when reconnected. New servers may be added at any time. - - This is a non-blocking call that will return regardless of - whether the initial connection succeeded. If you need to - ensure that a connection is ready before proceeding, see - :py:meth:`waitForServer`. - - When using SSL connections, all SSL files must be specified. - - :arg str host: The hostname or IP address of the server. - :arg int port: The port on which the gearman server is listening. - :arg str ssl_key: Path to the SSL private key. - :arg str ssl_cert: Path to the SSL certificate. - :arg str ssl_ca: Path to the CA certificate. - :raises ConfigurationError: If the host/port combination has - already been added to the client. - """ - - self.log.debug("Adding server %s port %s" % (host, port)) - - self.connections_condition.acquire() - try: - for conn in self.active_connections + self.inactive_connections: - if conn.host == host and conn.port == port: - raise ConfigurationError("Host/port already specified") - conn = Connection(host, port, ssl_key, ssl_cert, ssl_ca, - self.client_id) - self.inactive_connections.append(conn) - self.connections_condition.notifyAll() - finally: - self.connections_condition.release() - - def waitForServer(self): - """Wait for at least one server to be connected. - - Block until at least one gearman server is connected. - """ - connected = False - while self.running: - self.connections_condition.acquire() - while self.running and not self.active_connections: - self.log.debug("Waiting for at least one active connection") - self.connections_condition.wait() - if self.active_connections: - self.log.debug("Active connection found") - connected = True - self.connections_condition.release() - if connected: - return - - def getConnection(self): - """Return a connected server. - - Finds the next scheduled connected server in the round-robin - rotation and returns it. It is not usually necessary to use - this method external to the library, as more consumer-oriented - methods such as submitJob already use it internally, but is - available nonetheless if necessary. - - :returns: The next scheduled :py:class:`Connection` object. - :rtype: :py:class:`Connection` - :raises NoConnectedServersError: If there are not currently - connected servers. - """ - - conn = None - try: - self.connections_condition.acquire() - if not self.active_connections: - raise NoConnectedServersError("No connected Gearman servers") - - self.connection_index += 1 - if self.connection_index >= len(self.active_connections): - self.connection_index = 0 - conn = self.active_connections[self.connection_index] - finally: - self.connections_condition.release() - return conn - - def broadcast(self, packet): - """Send a packet to all currently connected servers. - - :arg Packet packet: The :py:class:`Packet` to send. - """ - connections = self.active_connections[:] - for connection in connections: - try: - self.sendPacket(packet, connection) - except Exception: - # Error handling is all done by sendPacket - pass - - def sendPacket(self, packet, connection): - """Send a packet to a single connection, removing it from the - list of active connections if that fails. - - :arg Packet packet: The :py:class:`Packet` to send. - :arg Connection connection: The :py:class:`Connection` on - which to send the packet. - """ - try: - connection.sendPacket(packet) - return - except Exception: - self.log.exception("Exception while sending packet %s to %s" % - (packet, connection)) - # If we can't send the packet, discard the connection - self._lostConnection(connection) - raise - - def handleEchoRes(self, packet): - """Handle an ECHO_RES packet. - - Causes the blocking :py:meth:`Connection.echo` invocation to - return. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: None - """ - packet.connection.handleEchoRes(packet.getArgument(0, True)) - - def handleError(self, packet): - """Handle an ERROR packet. - - Logs the error. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: None - """ - self.log.error("Received ERROR packet: %s: %s" % - (packet.getArgument(0), - packet.getArgument(1))) - try: - task = packet.connection.pending_tasks.pop(0) - task.setComplete() - except Exception: - self.log.exception("Exception while handling error packet:") - self._lostConnection(packet.connection) - - -class Client(BaseClient): - """A Gearman client. - - You may wish to subclass this class in order to override the - default event handlers to react to Gearman events. Be sure to - call the superclass event handlers so that they may perform - job-related housekeeping. - - :arg str client_id: The client ID to provide to Gearman. It will - appear in administrative output and be appended to the name of - the logger (e.g., gear.Client.client_id). Defaults to - 'unknown'. - """ - - def __init__(self, client_id='unknown'): - super(Client, self).__init__(client_id) - self.log = logging.getLogger("gear.Client.%s" % (self.client_id,)) - self.options = set() - - def __repr__(self): - return '' % id(self) - - def _onConnect(self, conn): - # Called immediately after a successful (re-)connection - self.broadcast_lock.acquire() - try: - super(Client, self)._onConnect(conn) - for name in self.options: - self._setOptionConnection(name, conn) - finally: - self.broadcast_lock.release() - - def _setOptionConnection(self, name, conn): - # Set an option on a connection - packet = Packet(constants.REQ, constants.OPTION_REQ, name) - task = OptionReqTask() - try: - conn.pending_tasks.append(task) - self.sendPacket(packet, conn) - except Exception: - # Error handling is all done by sendPacket - task = None - return task - - def setOption(self, name, timeout=30): - """Set an option for all connections. - - :arg str name: The option name to set. - :arg int timeout: How long to wait (in seconds) for a response - from the server before giving up (default: 30 seconds). - :returns: True if the option was set on all connections, - otherwise False - :rtype: bool - """ - tasks = {} - name = convert_to_bytes(name) - self.broadcast_lock.acquire() - - try: - self.options.add(name) - connections = self.active_connections[:] - for connection in connections: - task = self._setOptionConnection(name, connection) - if task: - tasks[task] = connection - finally: - self.broadcast_lock.release() - - success = True - for task in tasks.keys(): - complete = task.wait(timeout) - conn = tasks[task] - if not complete: - self.log.error("Connection %s timed out waiting for a " - "response to an option request: %s" % - (conn, name)) - self._lostConnection(conn) - continue - if name not in conn.options: - success = False - return success - - def submitJob(self, job, background=False, precedence=PRECEDENCE_NORMAL, - timeout=30): - """Submit a job to a Gearman server. - - Submits the provided job to the next server in this client's - round-robin connection pool. - - If the job is a foreground job, updates will be made to the - supplied :py:class:`Job` object as they are received. - - :arg Job job: The :py:class:`Job` to submit. - :arg bool background: Whether the job should be backgrounded. - :arg int precedence: Whether the job should have normal, low, or - high precedence. One of :py:data:`PRECEDENCE_NORMAL`, - :py:data:`PRECEDENCE_LOW`, or :py:data:`PRECEDENCE_HIGH` - :arg int timeout: How long to wait (in seconds) for a response - from the server before giving up (default: 30 seconds). - :raises ConfigurationError: If an invalid precendence value - is supplied. - """ - if job.unique is None: - unique = b'' - else: - unique = job.unique - data = b'\x00'.join((job.name, unique, job.arguments)) - if background: - if precedence == PRECEDENCE_NORMAL: - cmd = constants.SUBMIT_JOB_BG - elif precedence == PRECEDENCE_LOW: - cmd = constants.SUBMIT_JOB_LOW_BG - elif precedence == PRECEDENCE_HIGH: - cmd = constants.SUBMIT_JOB_HIGH_BG - else: - raise ConfigurationError("Invalid precedence value") - else: - if precedence == PRECEDENCE_NORMAL: - cmd = constants.SUBMIT_JOB - elif precedence == PRECEDENCE_LOW: - cmd = constants.SUBMIT_JOB_LOW - elif precedence == PRECEDENCE_HIGH: - cmd = constants.SUBMIT_JOB_HIGH - else: - raise ConfigurationError("Invalid precedence value") - packet = Packet(constants.REQ, cmd, data) - attempted_connections = set() - while True: - if attempted_connections == set(self.active_connections): - break - conn = self.getConnection() - task = SubmitJobTask(job) - conn.pending_tasks.append(task) - attempted_connections.add(conn) - try: - self.sendPacket(packet, conn) - except Exception: - # Error handling is all done by sendPacket - continue - complete = task.wait(timeout) - if not complete: - self.log.error("Connection %s timed out waiting for a " - "response to a submit job request: %s" % - (conn, job)) - self._lostConnection(conn) - continue - if not job.handle: - self.log.error("Connection %s sent an error in " - "response to a submit job request: %s" % - (conn, job)) - continue - job.connection = conn - return - raise GearmanError("Unable to submit job to any connected servers") - - def handleJobCreated(self, packet): - """Handle a JOB_CREATED packet. - - Updates the appropriate :py:class:`Job` with the newly - returned job handle. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - task = packet.connection.pending_tasks.pop(0) - if not isinstance(task, SubmitJobTask): - msg = ("Unexpected response received to submit job " - "request: %s" % packet) - self.log.error(msg) - self._lostConnection(packet.connection) - raise GearmanError(msg) - - job = task.job - job.handle = packet.data - packet.connection.related_jobs[job.handle] = job - task.setComplete() - self.log.debug("Job created; handle: %s" % job.handle) - return job - - def handleWorkComplete(self, packet): - """Handle a WORK_COMPLETE packet. - - Updates the referenced :py:class:`Job` with the returned data - and removes it from the list of jobs associated with the - connection. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - - job = packet.getJob() - data = packet.getArgument(1, True) - if data: - job.data.append(data) - job.complete = True - job.failure = False - del packet.connection.related_jobs[job.handle] - self.log.debug("Job complete; handle: %s data: %s" % - (job.handle, job.data)) - return job - - def handleWorkFail(self, packet): - """Handle a WORK_FAIL packet. - - Updates the referenced :py:class:`Job` with the returned data - and removes it from the list of jobs associated with the - connection. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - - job = packet.getJob() - job.complete = True - job.failure = True - del packet.connection.related_jobs[job.handle] - self.log.debug("Job failed; handle: %s" % job.handle) - return job - - def handleWorkException(self, packet): - """Handle a WORK_Exception packet. - - Updates the referenced :py:class:`Job` with the returned data - and removes it from the list of jobs associated with the - connection. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - - job = packet.getJob() - job.exception = packet.getArgument(1, True) - job.complete = True - job.failure = True - del packet.connection.related_jobs[job.handle] - self.log.debug("Job exception; handle: %s data: %s" % - (job.handle, job.exception)) - return job - - def handleWorkData(self, packet): - """Handle a WORK_DATA packet. - - Updates the referenced :py:class:`Job` with the returned data. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - - job = packet.getJob() - data = packet.getArgument(1, True) - if data: - job.data.append(data) - self.log.debug("Job data; handle: %s data: %s" % - (job.handle, job.data)) - return job - - def handleWorkWarning(self, packet): - """Handle a WORK_WARNING packet. - - Updates the referenced :py:class:`Job` with the returned data. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - - job = packet.getJob() - data = packet.getArgument(1, True) - if data: - job.data.append(data) - job.warning = True - self.log.debug("Job warning; handle: %s data: %s" % - (job.handle, job.data)) - return job - - def handleWorkStatus(self, packet): - """Handle a WORK_STATUS packet. - - Updates the referenced :py:class:`Job` with the returned data. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - - job = packet.getJob() - job.numerator = packet.getArgument(1) - job.denominator = packet.getArgument(2) - try: - job.fraction_complete = (float(job.numerator) / - float(job.denominator)) - except Exception: - job.fraction_complete = None - self.log.debug("Job status; handle: %s complete: %s/%s" % - (job.handle, job.numerator, job.denominator)) - return job - - def handleStatusRes(self, packet): - """Handle a STATUS_RES packet. - - Updates the referenced :py:class:`Job` with the returned data. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: The :py:class:`Job` object associated with the job request. - :rtype: :py:class:`Job` - """ - - job = packet.getJob() - job.known = (packet.getArgument(1) == '1') - job.running = (packet.getArgument(2) == '1') - job.numerator = packet.getArgument(3) - job.denominator = packet.getArgument(4) - - try: - job.fraction_complete = (float(job.numerator) / - float(job.denominator)) - except Exception: - job.fraction_complete = None - return job - - def handleOptionRes(self, packet): - """Handle an OPTION_RES packet. - - Updates the set of options for the connection. - - :arg Packet packet: The :py:class:`Packet` that was received. - :returns: None. - """ - task = packet.connection.pending_tasks.pop(0) - if not isinstance(task, OptionReqTask): - msg = ("Unexpected response received to option " - "request: %s" % packet) - self.log.error(msg) - self._lostConnection(packet.connection) - raise GearmanError(msg) - - packet.connection.handleOptionRes(packet.getArgument(0)) - task.setComplete() - - def handleDisconnect(self, job): - """Handle a Gearman server disconnection. - - If the Gearman server is disconnected, this will be called for any - jobs currently associated with the server. - - :arg Job packet: The :py:class:`Job` that was running when the server - disconnected. - """ - return job - - -class FunctionRecord(object): - """Represents a function that should be registered with Gearman. - - This class only directly needs to be instatiated for use with - :py:meth:`Worker.setFunctions`. If a timeout value is supplied, - the function will be registered with CAN_DO_TIMEOUT. - - :arg str name: The name of the function to register. - :arg numeric timeout: The timeout value (optional). - """ - - def __init__(self, name, timeout=None): - self.name = name - self.timeout = timeout - - def __repr__(self): - return '' % ( - id(self), self.name, self.timeout) - - -class Worker(BaseClient): - """A Gearman worker. - - :arg str client_id: The client ID to provide to Gearman. It will - appear in administrative output and be appended to the name of - the logger (e.g., gear.Worker.client_id). - :arg str worker_id: The client ID to provide to Gearman. It will - appear in administrative output and be appended to the name of - the logger (e.g., gear.Worker.client_id). This parameter name - is deprecated, use client_id instead. - """ - - def __init__(self, client_id=None, worker_id=None): - if not client_id or worker_id: - raise Exception("A client_id must be provided") - if worker_id: - client_id = worker_id - super(Worker, self).__init__(client_id) - self.log = logging.getLogger("gear.Worker.%s" % (self.client_id,)) - self.worker_id = client_id - self.functions = {} - self.job_lock = threading.Lock() - self.waiting_for_jobs = 0 - self.job_queue = queue.Queue() - - def __repr__(self): - return '' % id(self) - - def registerFunction(self, name, timeout=None): - """Register a function with Gearman. - - If a timeout value is supplied, the function will be - registered with CAN_DO_TIMEOUT. - - :arg str name: The name of the function to register. - :arg numeric timeout: The timeout value (optional). - """ - name = convert_to_bytes(name) - self.functions[name] = FunctionRecord(name, timeout) - if timeout: - self._sendCanDoTimeout(name, timeout) - else: - self._sendCanDo(name) - - def unRegisterFunction(self, name): - """Remove a function from Gearman's registry. - - :arg str name: The name of the function to remove. - """ - name = convert_to_bytes(name) - del self.functions[name] - self._sendCantDo(name) - - def setFunctions(self, functions): - """Replace the set of functions registered with Gearman. - - Accepts a list of :py:class:`FunctionRecord` objects which - represents the complete set of functions that should be - registered with Gearman. Any existing functions will be - unregistered and these registered in their place. If the - empty list is supplied, then the Gearman registered function - set will be cleared. - - :arg list functions: A list of :py:class:`FunctionRecord` objects. - """ - - self._sendResetAbilities() - self.functions = {} - for f in functions: - if not isinstance(f, FunctionRecord): - raise InvalidDataError( - "An iterable of FunctionRecords is required.") - self.functions[f.name] = f - for f in self.functions.values(): - if f.timeout: - self._sendCanDoTimeout(f.name, f.timeout) - else: - self._sendCanDo(f.name) - - def _sendCanDo(self, name): - self.broadcast_lock.acquire() - try: - p = Packet(constants.REQ, constants.CAN_DO, name) - self.broadcast(p) - finally: - self.broadcast_lock.release() - - def _sendCanDoTimeout(self, name, timeout): - self.broadcast_lock.acquire() - try: - data = name + b'\x00' + timeout - p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data) - self.broadcast(p) - finally: - self.broadcast_lock.release() - - def _sendCantDo(self, name): - self.broadcast_lock.acquire() - try: - p = Packet(constants.REQ, constants.CANT_DO, name) - self.broadcast(p) - finally: - self.broadcast_lock.release() - - def _sendResetAbilities(self): - self.broadcast_lock.acquire() - try: - p = Packet(constants.REQ, constants.RESET_ABILITIES, b'') - self.broadcast(p) - finally: - self.broadcast_lock.release() - - def _sendPreSleep(self, connection): - p = Packet(constants.REQ, constants.PRE_SLEEP, b'') - self.sendPacket(p, connection) - - def _sendGrabJobUniq(self, connection=None): - p = Packet(constants.REQ, constants.GRAB_JOB_UNIQ, b'') - if connection: - self.sendPacket(p, connection) - else: - self.broadcast(p) - - def _onConnect(self, conn): - self.broadcast_lock.acquire() - try: - # Called immediately after a successful (re-)connection - p = Packet(constants.REQ, constants.SET_CLIENT_ID, self.client_id) - conn.sendPacket(p) - super(Worker, self)._onConnect(conn) - for f in self.functions.values(): - if f.timeout: - data = f.name + b'\x00' + f.timeout - p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data) - else: - p = Packet(constants.REQ, constants.CAN_DO, f.name) - conn.sendPacket(p) - conn.changeState("IDLE") - finally: - self.broadcast_lock.release() - # Any exceptions will be handled by the calling function, and the - # connection will not be put into the pool. - - def _onActiveConnection(self, conn): - self.job_lock.acquire() - try: - if self.waiting_for_jobs > 0: - self._updateStateMachines() - finally: - self.job_lock.release() - - def _updateStateMachines(self): - connections = self.active_connections[:] - - for connection in connections: - if (connection.state == "IDLE" and self.waiting_for_jobs > 0): - self._sendGrabJobUniq(connection) - connection.changeState("GRAB_WAIT") - if (connection.state != "IDLE" and self.waiting_for_jobs < 1): - connection.changeState("IDLE") - - def getJob(self): - """Get a job from Gearman. - - Blocks until a job is received. This method is re-entrant, so - it is safe to call this method on a single worker from - multiple threads. In that case, one of them at random will - receive the job assignment. - - :returns: The :py:class:`WorkerJob` assigned. - :rtype: :py:class:`WorkerJob`. - :raises InterruptedError: If interrupted (by - :py:meth:`stopWaitingForJobs`) before a job is received. - """ - self.job_lock.acquire() - try: - self.waiting_for_jobs += 1 - self.log.debug("Get job; number of threads waiting for jobs: %s" % - self.waiting_for_jobs) - - try: - job = self.job_queue.get(False) - except queue.Empty: - job = None - - if not job: - self._updateStateMachines() - finally: - self.job_lock.release() - - if not job: - job = self.job_queue.get() - - self.log.debug("Received job: %s" % job) - if job is None: - raise InterruptedError() - return job - - def stopWaitingForJobs(self): - """Interrupts all running :py:meth:`getJob` calls, which will raise - an exception. - """ - - self.job_lock.acquire() - try: - while True: - connections = self.active_connections[:] - now = time.time() - ok = True - for connection in connections: - if connection.state == "GRAB_WAIT": - # Replies to GRAB_JOB should be fast, give up if we've - # been waiting for more than 5 seconds. - if now - connection.state_time > 5: - self._lostConnection(connection) - else: - ok = False - if ok: - break - else: - self.job_lock.release() - time.sleep(0.1) - self.job_lock.acquire() - - while self.waiting_for_jobs > 0: - self.waiting_for_jobs -= 1 - self.job_queue.put(None) - - self._updateStateMachines() - finally: - self.job_lock.release() - - def _shutdown(self): - super(Worker, self)._shutdown() - self.stopWaitingForJobs() - - def handleNoop(self, packet): - """Handle a NOOP packet. - - Sends a GRAB_JOB_UNIQ packet on the same connection. - GRAB_JOB_UNIQ will return jobs regardless of whether they have - been specified with a unique identifier when submitted. If - they were not, then :py:attr:`WorkerJob.unique` attribute - will be None. - - :arg Packet packet: The :py:class:`Packet` that was received. - """ - - self.job_lock.acquire() - try: - if packet.connection.state == "SLEEP": - self.log.debug("Sending GRAB_JOB_UNIQ") - self._sendGrabJobUniq(packet.connection) - packet.connection.changeState("GRAB_WAIT") - else: - self.log.debug("Received unexpecetd NOOP packet on %s" % - packet.connection) - finally: - self.job_lock.release() - - def handleNoJob(self, packet): - """Handle a NO_JOB packet. - - Sends a PRE_SLEEP packet on the same connection. - - :arg Packet packet: The :py:class:`Packet` that was received. - """ - self.job_lock.acquire() - try: - if packet.connection.state == "GRAB_WAIT": - self.log.debug("Sending PRE_SLEEP") - self._sendPreSleep(packet.connection) - packet.connection.changeState("SLEEP") - else: - self.log.debug("Received unexpected NO_JOB packet on %s" % - packet.connection) - finally: - self.job_lock.release() - - def handleJobAssign(self, packet): - """Handle a JOB_ASSIGN packet. - - Adds a WorkerJob to the internal queue to be picked up by any - threads waiting in :py:meth:`getJob`. - - :arg Packet packet: The :py:class:`Packet` that was received. - """ - - handle = packet.getArgument(0) - name = packet.getArgument(1) - arguments = packet.getArgument(2, True) - return self._handleJobAssignment(packet, handle, name, - arguments, None) - - def handleJobAssignUnique(self, packet): - """Handle a JOB_ASSIGN_UNIQ packet. - - Adds a WorkerJob to the internal queue to be picked up by any - threads waiting in :py:meth:`getJob`. - - :arg Packet packet: The :py:class:`Packet` that was received. - """ - - handle = packet.getArgument(0) - name = packet.getArgument(1) - unique = packet.getArgument(2) - if unique == b'': - unique = None - arguments = packet.getArgument(3, True) - return self._handleJobAssignment(packet, handle, name, - arguments, unique) - - def _handleJobAssignment(self, packet, handle, name, arguments, unique): - job = WorkerJob(handle, name, arguments, unique) - job.connection = packet.connection - - self.job_lock.acquire() - try: - packet.connection.changeState("IDLE") - self.waiting_for_jobs -= 1 - self.log.debug("Job assigned; number of threads waiting for " - "jobs: %s" % self.waiting_for_jobs) - self.job_queue.put(job) - - self._updateStateMachines() - finally: - self.job_lock.release() - - -class BaseJob(object): - def __init__(self, name, arguments, unique=None, handle=None): - self.name = convert_to_bytes(name) - if (not isinstance(arguments, bytes) and - not isinstance(arguments, bytearray)): - raise TypeError("arguments must be of type bytes or bytearray") - self.arguments = arguments - self.unique = convert_to_bytes(unique) - self.handle = handle - self.connection = None - - def __repr__(self): - return '' % ( - id(self), self.handle, self.name, self.unique) - - -class Job(BaseJob): - """A job to run or being run by Gearman. - - :arg str name: The name of the job. - :arg bytes arguments: The opaque data blob to be passed to the worker - as arguments. - :arg str unique: A byte string to uniquely identify the job to Gearman - (optional). - - The following instance attributes are available: - - **name** (str) - The name of the job. - **arguments** (bytes) - The opaque data blob passed to the worker as arguments. - **unique** (str or None) - The unique ID of the job (if supplied). - **handle** (bytes or None) - The Gearman job handle. None if no job handle has been received yet. - **data** (list of byte-arrays) - The result data returned from Gearman. Each packet appends an - element to the list. Depending on the nature of the data, the - elements may need to be concatenated before use. - **exception** (bytes or None) - Exception information returned from Gearman. None if no exception - has been received. - **warning** (bool) - Whether the worker has reported a warning. - **complete** (bool) - Whether the job is complete. - **failure** (bool) - Whether the job has failed. Only set when complete is True. - **numerator** (bytes or None) - The numerator of the completion ratio reported by the worker. - Only set when a status update is sent by the worker. - **denominator** (bytes or None) - The denominator of the completion ratio reported by the - worker. Only set when a status update is sent by the worker. - **fraction_complete** (float or None) - The fractional complete ratio reported by the worker. Only set when - a status update is sent by the worker. - **known** (bool or None) - Whether the job is known to Gearman. Only set by handleStatusRes() in - response to a getStatus() query. - **running** (bool or None) - Whether the job is running. Only set by handleStatusRes() in - response to a getStatus() query. - **connection** (:py:class:`Connection` or None) - The connection associated with the job. Only set after the job - has been submitted to a Gearman server. - """ - - def __init__(self, name, arguments, unique=None): - super(Job, self).__init__(name, arguments, unique) - self.data = [] - self.exception = None - self.warning = False - self.complete = False - self.failure = False - self.numerator = None - self.denominator = None - self.fraction_complete = None - self.known = None - self.running = None - - -class WorkerJob(BaseJob): - """A job that Gearman has assigned to a Worker. Not intended to - be instantiated directly, but rather returned by - :py:meth:`Worker.getJob`. - - :arg str handle: The job handle assigned by gearman. - :arg str name: The name of the job. - :arg bytes arguments: The opaque data blob passed to the worker - as arguments. - :arg str unique: A byte string to uniquely identify the job to Gearman - (optional). - - The following instance attributes are available: - - **name** (str) - The name of the job. - **arguments** (bytes) - The opaque data blob passed to the worker as arguments. - **unique** (str or None) - The unique ID of the job (if supplied). - **handle** (bytes) - The Gearman job handle. - **connection** (:py:class:`Connection` or None) - The connection associated with the job. Only set after the job - has been submitted to a Gearman server. - """ - - def __init__(self, handle, name, arguments, unique=None): - super(WorkerJob, self).__init__(name, arguments, unique, handle) - - def sendWorkData(self, data=b''): - """Send a WORK_DATA packet to the client. - - :arg bytes data: The data to be sent to the client (optional). - """ - - data = self.handle + b'\x00' + data - p = Packet(constants.REQ, constants.WORK_DATA, data) - self.connection.sendPacket(p) - - def sendWorkWarning(self, data=b''): - """Send a WORK_WARNING packet to the client. - - :arg bytes data: The data to be sent to the client (optional). - """ - - data = self.handle + b'\x00' + data - p = Packet(constants.REQ, constants.WORK_WARNING, data) - self.connection.sendPacket(p) - - def sendWorkStatus(self, numerator, denominator): - """Send a WORK_STATUS packet to the client. - - Sends a numerator and denominator that together represent the - fraction complete of the job. - - :arg numeric numerator: The numerator of the fraction complete. - :arg numeric denominator: The denominator of the fraction complete. - """ - - data = (self.handle + b'\x00' + - str(numerator).encode('utf8') + b'\x00' + - str(denominator).encode('utf8')) - p = Packet(constants.REQ, constants.WORK_STATUS, data) - self.connection.sendPacket(p) - - def sendWorkComplete(self, data=b''): - """Send a WORK_COMPLETE packet to the client. - - :arg bytes data: The data to be sent to the client (optional). - """ - - data = self.handle + b'\x00' + data - p = Packet(constants.REQ, constants.WORK_COMPLETE, data) - self.connection.sendPacket(p) - - def sendWorkFail(self): - "Send a WORK_FAIL packet to the client." - - p = Packet(constants.REQ, constants.WORK_FAIL, self.handle) - self.connection.sendPacket(p) - - def sendWorkException(self, data=b''): - """Send a WORK_EXCEPTION packet to the client. - - :arg bytes data: The exception data to be sent to the client - (optional). - """ - - data = self.handle + b'\x00' + data - p = Packet(constants.REQ, constants.WORK_EXCEPTION, data) - self.connection.sendPacket(p) - - -# Below are classes for use in the server implementation: -class ServerJob(Job): - """A job record for use in a server. - - :arg str name: The name of the job. - :arg bytes arguments: The opaque data blob to be passed to the worker - as arguments. - :arg str unique: A byte string to uniquely identify the job to Gearman - (optional). - - The following instance attributes are available: - - **name** (str) - The name of the job. - **arguments** (bytes) - The opaque data blob passed to the worker as arguments. - **unique** (str or None) - The unique ID of the job (if supplied). - **handle** (bytes or None) - The Gearman job handle. None if no job handle has been received yet. - **data** (list of byte-arrays) - The result data returned from Gearman. Each packet appends an - element to the list. Depending on the nature of the data, the - elements may need to be concatenated before use. - **exception** (bytes or None) - Exception information returned from Gearman. None if no exception - has been received. - **warning** (bool) - Whether the worker has reported a warning. - **complete** (bool) - Whether the job is complete. - **failure** (bool) - Whether the job has failed. Only set when complete is True. - **numerator** (bytes or None) - The numerator of the completion ratio reported by the worker. - Only set when a status update is sent by the worker. - **denominator** (bytes or None) - The denominator of the completion ratio reported by the - worker. Only set when a status update is sent by the worker. - **fraction_complete** (float or None) - The fractional complete ratio reported by the worker. Only set when - a status update is sent by the worker. - **known** (bool or None) - Whether the job is known to Gearman. Only set by handleStatusRes() in - response to a getStatus() query. - **running** (bool or None) - Whether the job is running. Only set by handleStatusRes() in - response to a getStatus() query. - **client_connection** :py:class:`Connection` - The client connection associated with the job. - **worker_connection** (:py:class:`Connection` or None) - The worker connection associated with the job. Only set after the job - has been assigned to a worker. - """ - - def __init__(self, handle, name, arguments, client_connection, - unique=None): - super(ServerJob, self).__init__(name, arguments, unique) - self.handle = handle - self.client_connection = client_connection - self.worker_connection = None - del self.connection - - -class ServerAdminRequest(AdminRequest): - """An administrative request sent to a server.""" - - def __init__(self, connection): - super(ServerAdminRequest, self).__init__() - self.connection = connection - - def isComplete(self, data): - if data[-1:] == b'\n': - self.command = data.strip() - return True - return False - - -class ServerConnection(Connection): - """A Connection to a Gearman Client.""" - - def __init__(self, addr, conn, use_ssl, client_id): - if client_id: - self.log = logging.getLogger("gear.ServerConnection.%s" % - (client_id,)) - else: - self.log = logging.getLogger("gear.ServerConnection") - self.host = addr[0] - self.port = addr[1] - self.conn = conn - self.use_ssl = use_ssl - self.client_id = None - self.functions = set() - self.related_jobs = {} - self.ssl_subject = None - if self.use_ssl: - for x in conn.getpeercert()['subject']: - if x[0][0] == 'commonName': - self.ssl_subject = x[0][1] - self.log.debug("SSL subject: %s" % self.ssl_subject) - self.changeState("INIT") - - def _getAdminRequest(self): - return ServerAdminRequest(self) - - def __repr__(self): - return '' % ( - id(self), self.client_id, self.host, self.port) - - -class Server(BaseClientServer): - """A simple gearman server implementation for testing - (not for production use). - - :arg int port: The TCP port on which to listen. - :arg str ssl_key: Path to the SSL private key. - :arg str ssl_cert: Path to the SSL certificate. - :arg str ssl_ca: Path to the CA certificate. - :arg str statsd_host: statsd hostname. None means disabled - (the default). - :arg str statsd_port: statsd port (defaults to 8125). - :arg str statsd_prefix: statsd key prefix. - :arg str client_id: The ID associated with this server. - It will be appending to the name of the logger (e.g., - gear.Server.server_id). Defaults to None (unused). - :arg ACL acl: An :py:class:`ACL` object if the server should apply - access control rules to its connections. - """ - - def __init__(self, port=4730, ssl_key=None, ssl_cert=None, ssl_ca=None, - statsd_host=None, statsd_port=8125, statsd_prefix=None, - server_id=None, acl=None): - self.port = port - self.ssl_key = ssl_key - self.ssl_cert = ssl_cert - self.ssl_ca = ssl_ca - self.high_queue = [] - self.normal_queue = [] - self.low_queue = [] - self.jobs = {} - self.functions = set() - self.max_handle = 0 - self.acl = acl - self.connect_wake_read, self.connect_wake_write = os.pipe() - - self.use_ssl = False - if all([self.ssl_key, self.ssl_cert, self.ssl_ca]): - self.use_ssl = True - - for res in socket.getaddrinfo(None, self.port, socket.AF_UNSPEC, - socket.SOCK_STREAM, 0, - socket.AI_PASSIVE): - af, socktype, proto, canonname, sa = res - try: - self.socket = socket.socket(af, socktype, proto) - self.socket.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - except socket.error: - self.socket = None - continue - try: - self.socket.bind(sa) - self.socket.listen(1) - except socket.error: - self.socket.close() - self.socket = None - continue - break - - if self.socket is None: - raise Exception("Could not open socket") - - if port == 0: - self.port = self.socket.getsockname()[1] - - super(Server, self).__init__(server_id) - if server_id: - self.log = logging.getLogger("gear.Server.%s" % (self.client_id,)) - else: - self.log = logging.getLogger("gear.Server") - - if statsd_host: - if not statsd: - self.log.error("Unable to import statsd module") - self.statsd = None - else: - self.statsd = statsd.StatsClient(statsd_host, - statsd_port, - statsd_prefix) - else: - self.statsd = None - - def _doConnectLoop(self): - while self.running: - try: - self.connectLoop() - except Exception: - self.log.exception("Exception in connect loop:") - time.sleep(1) - - def connectLoop(self): - poll = SelectPoll() - bitmask = (select.POLLIN | select.POLLERR | - select.POLLHUP | select.POLLNVAL) - # Register the wake pipe so that we can break if we need to - # shutdown. - poll.register(self.connect_wake_read, bitmask) - poll.register(self.socket.fileno(), bitmask) - while self.running: - ret = poll.poll() - for fd, event in ret: - if fd == self.connect_wake_read: - self.log.debug("Accept woken by pipe") - while True: - if os.read(self.connect_wake_read, 1) == b'\n': - break - return - if event & select.POLLIN: - self.log.debug("Accepting new connection") - c, addr = self.socket.accept() - if self.use_ssl: - c = ssl.wrap_socket(c, server_side=True, - keyfile=self.ssl_key, - certfile=self.ssl_cert, - ca_certs=self.ssl_ca, - cert_reqs=ssl.CERT_REQUIRED, - ssl_version=ssl.PROTOCOL_TLSv1) - conn = ServerConnection(addr, c, self.use_ssl, - self.client_id) - self.log.info("Accepted connection %s" % (conn,)) - self.connections_condition.acquire() - try: - self.active_connections.append(conn) - self.connections_condition.notifyAll() - os.write(self.wake_write, b'1\n') - finally: - self.connections_condition.release() - - def _shutdown(self): - super(Server, self)._shutdown() - os.write(self.connect_wake_write, b'1\n') - - def _cleanup(self): - super(Server, self)._cleanup() - self.socket.close() - os.close(self.connect_wake_read) - os.close(self.connect_wake_write) - - def _lostConnection(self, conn): - # Called as soon as a connection is detected as faulty. - self.log.info("Marking %s as disconnected" % conn) - self.connections_condition.acquire() - try: - jobs = conn.related_jobs.values() - if conn in self.active_connections: - self.active_connections.remove(conn) - finally: - self.connections_condition.notifyAll() - self.connections_condition.release() - for job in jobs: - if job.worker_connection == conn: - # the worker disconnected, alert the client - try: - p = Packet(constants.REQ, constants.WORK_FAIL, job.handle) - job.client_connection.sendPacket(p) - except Exception: - self.log.exception("Sending WORK_FAIL to client after " - "worker disconnect failed:") - self._removeJob(job) - self._updateStats() - - def _removeJob(self, job, dequeue=True): - # dequeue is tri-state: True, False, or a specific queue - try: - del job.client_connection.related_jobs[job.handle] - except KeyError: - pass - if job.worker_connection: - try: - del job.worker_connection.related_jobs[job.handle] - except KeyError: - pass - try: - del self.jobs[job.handle] - except KeyError: - pass - if dequeue is True: - # Search all queues for the job - try: - self.high_queue.remove(job) - except ValueError: - pass - try: - self.normal_queue.remove(job) - except ValueError: - pass - try: - self.low_queue.remove(job) - except ValueError: - pass - elif dequeue is not False: - # A specific queue was supplied - dequeue.remove(job) - # If dequeue is false, no need to remove from any queue - - def getQueue(self): - """Returns a copy of all internal queues in a flattened form. - - :returns: The Gearman queue. - :rtype: list of :py:class:`WorkerJob`. - """ - ret = [] - for q in [self.high_queue, self.normal_queue, self.low_queue]: - ret += q - return ret - - def handleAdminRequest(self, request): - if request.command.startswith(b'cancel job'): - self.handleCancelJob(request) - elif request.command.startswith(b'status'): - self.handleStatus(request) - elif request.command.startswith(b'workers'): - self.handleWorkers(request) - elif request.command.startswith(b'acl list'): - self.handleACLList(request) - elif request.command.startswith(b'acl grant'): - self.handleACLGrant(request) - elif request.command.startswith(b'acl revoke'): - self.handleACLRevoke(request) - elif request.command.startswith(b'acl self-revoke'): - self.handleACLSelfRevoke(request) - - def _cancelJob(self, request, job, queue): - if self.acl: - if not self.acl.canInvoke(request.connection.ssl_subject, - job.name): - self.log.info("Rejecting cancel job from %s for %s " - "due to ACL" % - (request.connection.ssl_subject, job.name)) - request.connection.sendRaw(b'ERR PERMISSION_DENIED\n') - return - self._removeJob(job, dequeue=queue) - self._updateStats() - request.connection.sendRaw(b'OK\n') - return - - def handleCancelJob(self, request): - words = request.command.split() - handle = words[2] - - if handle in self.jobs: - for q in [self.high_queue, self.normal_queue, self.low_queue]: - for job in q: - if handle == job.handle: - return self._cancelJob(request, job, q) - request.connection.sendRaw(b'ERR UNKNOWN_JOB\n') - - def handleACLList(self, request): - if self.acl is None: - request.connection.sendRaw(b'ERR ACL_DISABLED\n') - return - for entry in self.acl.getEntries(): - l = "%s\tregister=%s\tinvoke=%s\tgrant=%s\n" % ( - entry.subject, entry.register, entry.invoke, entry.grant) - request.connection.sendRaw(l.encode('utf8')) - request.connection.sendRaw(b'.\n') - - def handleACLGrant(self, request): - # acl grant register worker .* - words = request.command.split(None, 4) - verb = words[2] - subject = words[3] - - if self.acl is None: - request.connection.sendRaw(b'ERR ACL_DISABLED\n') - return - if not self.acl.canGrant(request.connection.ssl_subject): - request.connection.sendRaw(b'ERR PERMISSION_DENIED\n') - return - try: - if verb == 'invoke': - self.acl.grantInvoke(subject, words[4]) - elif verb == 'register': - self.acl.grantRegister(subject, words[4]) - elif verb == 'grant': - self.acl.grantGrant(subject) - else: - request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n') - return - except ACLError, e: - self.log.info("Error in grant command: %s" % (e.message,)) - request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,)) - return - request.connection.sendRaw(b'OK\n') - - def handleACLRevoke(self, request): - # acl revoke register worker - words = request.command.split() - verb = words[2] - subject = words[3] - - if self.acl is None: - request.connection.sendRaw(b'ERR ACL_DISABLED\n') - return - if subject != request.connection.ssl_subject: - if not self.acl.canGrant(request.connection.ssl_subject): - request.connection.sendRaw(b'ERR PERMISSION_DENIED\n') - return - try: - if verb == 'invoke': - self.acl.revokeInvoke(subject) - elif verb == 'register': - self.acl.revokeRegister(subject) - elif verb == 'grant': - self.acl.revokeGrant(subject) - elif verb == 'all': - try: - self.acl.remove(subject) - except ACLError: - pass - else: - request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n') - return - except ACLError, e: - self.log.info("Error in revoke command: %s" % (e.message,)) - request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,)) - return - request.connection.sendRaw(b'OK\n') - - def handleACLSelfRevoke(self, request): - # acl self-revoke register - words = request.command.split() - verb = words[2] - - if self.acl is None: - request.connection.sendRaw(b'ERR ACL_DISABLED\n') - return - subject = request.connection.ssl_subject - try: - if verb == 'invoke': - self.acl.revokeInvoke(subject) - elif verb == 'register': - self.acl.revokeRegister(subject) - elif verb == 'grant': - self.acl.revokeGrant(subject) - elif verb == 'all': - try: - self.acl.remove(subject) - except ACLError: - pass - else: - request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n') - return - except ACLError, e: - self.log.info("Error in self-revoke command: %s" % (e.message,)) - request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,)) - return - request.connection.sendRaw(b'OK\n') - - def _getFunctionStats(self): - functions = {} - for function in self.functions: - # Total, running, workers - functions[function] = [0, 0, 0] - for job in self.jobs.values(): - if job.name not in functions: - functions[job.name] = [0, 0, 0] - functions[job.name][0] += 1 - if job.running: - functions[job.name][1] += 1 - for connection in self.active_connections: - for function in connection.functions: - if function not in functions: - functions[function] = [0, 0, 0] - functions[function][2] += 1 - return functions - - def handleStatus(self, request): - functions = self._getFunctionStats() - for name, values in functions.items(): - request.connection.sendRaw(("%s\t%s\t%s\t%s\n" % - (name, values[0], values[1], - values[2])).encode('utf8')) - request.connection.sendRaw(b'.\n') - - def handleWorkers(self, request): - for connection in self.active_connections: - fd = connection.conn.fileno() - ip = connection.host - client_id = connection.client_id or '-' - functions = ' '.join(connection.functions) - request.connection.sendRaw(("%s %s %s : %s\n" % - (fd, ip, client_id, functions)) - .encode('utf8')) - request.connection.sendRaw(b'.\n') - - def wakeConnections(self): - p = Packet(constants.RES, constants.NOOP, b'') - for connection in self.active_connections: - if connection.state == 'SLEEP': - connection.changeState("AWAKE") - connection.sendPacket(p) - - def reportTimingStats(self, ptype, duration): - """Report processing times by packet type - - This method is called by handlePacket to report how long - processing took for each packet. If statsd is configured, - timing and counts are reported with the key - "prefix.packet.NAME". - - :arg bytes ptype: The packet type (one of the packet types in - constants). - :arg float duration: The time (in seconds) it took to process - the packet. - """ - if not self.statsd: - return - ptype = constants.types.get(ptype, 'UNKNOWN') - key = 'packet.%s' % ptype - self.statsd.timing(key, int(duration * 1000)) - self.statsd.incr(key) - - def _updateStats(self): - if not self.statsd: - return - - # prefix.queue.total - # prefix.queue.running - # prefix.queue.waiting - # prefix.workers - base_key = 'queue' - total = 0 - running = 0 - waiting = 0 - for job in self.jobs.values(): - total += 1 - if job.running: - running += 1 - else: - waiting += 1 - - key = '.'.join([base_key, 'total']) - self.statsd.gauge(key, total) - - key = '.'.join([base_key, 'running']) - self.statsd.gauge(key, running) - - key = '.'.join([base_key, 'waiting']) - self.statsd.gauge(key, waiting) - - workers = 0 - for connection in self.active_connections: - if connection.functions: - workers += 1 - self.statsd.gauge('workers', workers) - - def _handleSubmitJob(self, packet, precedence): - name = packet.getArgument(0) - unique = packet.getArgument(1) - if not unique: - unique = None - arguments = packet.getArgument(2, True) - if self.acl: - if not self.acl.canInvoke(packet.connection.ssl_subject, name): - self.log.info("Rejecting SUBMIT_JOB from %s for %s " - "due to ACL" % - (packet.connection.ssl_subject, name)) - self.sendError(packet.connection, 0, - 'Permission denied by ACL') - return - self.max_handle += 1 - handle = ('H:%s:%s' % (packet.connection.host, - self.max_handle)).encode('utf8') - job = ServerJob(handle, name, arguments, packet.connection, unique) - p = Packet(constants.RES, constants.JOB_CREATED, handle) - packet.connection.sendPacket(p) - self.jobs[handle] = job - packet.connection.related_jobs[handle] = job - if precedence == PRECEDENCE_HIGH: - self.high_queue.append(job) - elif precedence == PRECEDENCE_NORMAL: - self.normal_queue.append(job) - elif precedence == PRECEDENCE_LOW: - self.low_queue.append(job) - self._updateStats() - self.wakeConnections() - - def handleSubmitJob(self, packet): - return self._handleSubmitJob(packet, PRECEDENCE_NORMAL) - - def handleSubmitJobHigh(self, packet): - return self._handleSubmitJob(packet, PRECEDENCE_HIGH) - - def handleSubmitJobLow(self, packet): - return self._handleSubmitJob(packet, PRECEDENCE_LOW) - - def getJobForConnection(self, connection, peek=False): - for q in [self.high_queue, self.normal_queue, self.low_queue]: - for job in q: - if job.name in connection.functions: - if not peek: - q.remove(job) - connection.related_jobs[job.handle] = job - job.worker_connection = connection - job.running = True - self._updateStats() - return job - return None - - def handleGrabJobUniq(self, packet): - job = self.getJobForConnection(packet.connection) - if job: - self.sendJobAssignUniq(packet.connection, job) - else: - self.sendNoJob(packet.connection) - - def sendJobAssignUniq(self, connection, job): - unique = job.unique - if not unique: - unique = b'' - data = b'\x00'.join((job.handle, job.name, unique, job.arguments)) - p = Packet(constants.RES, constants.JOB_ASSIGN_UNIQ, data) - connection.sendPacket(p) - - def sendNoJob(self, connection): - p = Packet(constants.RES, constants.NO_JOB, b'') - connection.sendPacket(p) - - def handlePreSleep(self, packet): - packet.connection.changeState("SLEEP") - if self.getJobForConnection(packet.connection, peek=True): - self.wakeConnections() - - def handleWorkComplete(self, packet): - self.handlePassthrough(packet, True) - - def handleWorkFail(self, packet): - self.handlePassthrough(packet, True) - - def handleWorkException(self, packet): - self.handlePassthrough(packet, True) - - def handleWorkData(self, packet): - self.handlePassthrough(packet) - - def handleWorkWarning(self, packet): - self.handlePassthrough(packet) - - def handleWorkStatus(self, packet): - handle = packet.getArgument(0) - job = self.jobs.get(handle) - if not job: - raise UnknownJobError() - job.numerator = packet.getArgument(1) - job.denominator = packet.getArgument(2) - self.handlePassthrough(packet) - - def handlePassthrough(self, packet, finished=False): - handle = packet.getArgument(0) - job = self.jobs.get(handle) - if not job: - raise UnknownJobError() - packet.code = constants.RES - job.client_connection.sendPacket(packet) - if finished: - self._removeJob(job, dequeue=False) - self._updateStats() - - def handleSetClientID(self, packet): - name = packet.getArgument(0) - packet.connection.client_id = name - - def sendError(self, connection, code, text): - data = (str(code).encode('utf8') + b'\x00' + - str(text).encode('utf8') + b'\x00') - p = Packet(constants.RES, constants.ERROR, data) - connection.sendPacket(p) - - def handleCanDo(self, packet): - name = packet.getArgument(0) - if self.acl: - if not self.acl.canRegister(packet.connection.ssl_subject, name): - self.log.info("Ignoring CAN_DO from %s for %s due to ACL" % - (packet.connection.ssl_subject, name)) - # CAN_DO normally does not merit a response so it is - # not clear that it is appropriate to send an ERROR - # response at this point. - return - self.log.debug("Adding function %s to %s" % (name, packet.connection)) - packet.connection.functions.add(name) - self.functions.add(name) - - def handleCantDo(self, packet): - name = packet.getArgument(0) - self.log.debug("Removing function %s from %s" % - (name, packet.connection)) - packet.connection.functions.remove(name) - - def handleResetAbilities(self, packet): - self.log.debug("Resetting functions for %s" % packet.connection) - packet.connection.functions = set() - - def handleGetStatus(self, packet): - handle = packet.getArgument(0) - self.log.debug("Getting status for %s" % handle) - - known = 0 - running = 0 - numerator = b'' - denominator = b'' - job = self.jobs.get(handle) - if job: - known = 1 - if job.running: - running = 1 - numerator = job.numerator or b'' - denominator = job.denominator or b'' - - data = (handle + b'\x00' + - str(known).encode('utf8') + b'\x00' + - str(running).encode('utf8') + b'\x00' + - numerator + b'\x00' + - denominator) - p = Packet(constants.RES, constants.STATUS_RES, data) - packet.connection.sendPacket(p) diff --git a/libra/gear/acl.py b/libra/gear/acl.py deleted file mode 100644 index 3425235b..00000000 --- a/libra/gear/acl.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - - -class ACLError(Exception): - pass - - -class ACLEntry(object): - """An access control list entry. - - :arg str subject: The SSL certificate Subject Common Name to which - the entry applies. - - :arg str register: A regular expression that matches the jobs that - connections with this certificate are permitted to register. - - :arg str invoke: A regular expression that matches the jobs that - connections with this certificate are permitted to invoke. - Also implies the permission to cancel the same set of jobs in - the queue. - - :arg boolean grant: A flag indicating whether connections with - this certificate are permitted to grant access to other - connections. Also implies the permission to revoke access - from other connections. The ability to self-revoke access is - always implied. - """ - - def __init__(self, subject, register=None, invoke=None, grant=False): - self.subject = subject - self.setRegister(register) - self.setInvoke(invoke) - self.setGrant(grant) - - def __repr__(self): - return ('' % - (self.subject, self.register, self.invoke, self.grant)) - - def isEmpty(self): - """Checks whether this entry grants any permissions at all. - - :returns: False if any permission is granted, otherwise True. - """ - if (self.register is None and - self.invoke is None and - self.grant is False): - return True - return False - - def canRegister(self, name): - """Check whether this subject is permitted to register a function. - - :arg str name: The function name to check. - :returns: A boolean indicating whether the action should be permitted. - """ - if self.register is None: - return False - if not self._register.match(name): - return False - return True - - def canInvoke(self, name): - """Check whether this subject is permitted to register a function. - - :arg str name: The function name to check. - :returns: A boolean indicating whether the action should be permitted. - """ - if self.invoke is None: - return False - if not self._invoke.match(name): - return False - return True - - def setRegister(self, register): - """Sets the functions that this subject can register. - - :arg str register: A regular expression that matches the jobs that - connections with this certificate are permitted to register. - """ - self.register = register - if register: - try: - self._register = re.compile(register) - except re.error, e: - raise ACLError('Regular expression error: %s' % (e.message,)) - else: - self._register = None - - def setInvoke(self, invoke): - """Sets the functions that this subject can invoke. - - :arg str invoke: A regular expression that matches the jobs that - connections with this certificate are permitted to invoke. - """ - self.invoke = invoke - if invoke: - try: - self._invoke = re.compile(invoke) - except re.error, e: - raise ACLError('Regular expression error: %s' % (e.message,)) - else: - self._invoke = None - - def setGrant(self, grant): - """Sets whether this subject can grant ACLs to others. - - :arg boolean grant: A flag indicating whether connections with - this certificate are permitted to grant access to other - connections. Also implies the permission to revoke access - from other connections. The ability to self-revoke access is - always implied. - """ - self.grant = grant - - -class ACL(object): - """An access control list. - - ACLs are deny-by-default. The checked actions are only allowed if - there is an explicit rule in the ACL granting permission for a - given client (identified by SSL certificate Common Name Subject) - to perform that action. - """ - - def __init__(self): - self.subjects = {} - - def add(self, entry): - """Add an ACL entry. - - :arg Entry entry: The :py:class:`ACLEntry` to add. - :raises ACLError: If there is already an entry for the subject. - """ - if entry.subject in self.subjects: - raise ACLError("An ACL entry for %s already exists" % - (entry.subject,)) - self.subjects[entry.subject] = entry - - def remove(self, subject): - """Remove an ACL entry. - - :arg str subject: The SSL certificate Subject Common Name to - remove from the ACL. - :raises ACLError: If there is no entry for the subject. - """ - if subject not in self.subjects: - raise ACLError("There is no ACL entry for %s" % (subject,)) - del self.subjects[subject] - - def getEntries(self): - """Return a list of current ACL entries. - - :returns: A list of :py:class:`ACLEntry` objects. - """ - items = self.subjects.items() - items.sort(lambda a, b: cmp(a[0], b[0])) - return [x[1] for x in items] - - def canRegister(self, subject, name): - """Check whether a subject is permitted to register a function. - - :arg str subject: The SSL certificate Subject Common Name to - check against. - :arg str name: The function name to check. - :returns: A boolean indicating whether the action should be permitted. - """ - entry = self.subjects.get(subject) - if entry is None: - return False - return entry.canRegister(name) - - def canInvoke(self, subject, name): - """Check whether a subject is permitted to invoke a function. - - :arg str subject: The SSL certificate Subject Common Name to - check against. - :arg str name: The function name to check. - :returns: A boolean indicating whether the action should be permitted. - """ - entry = self.subjects.get(subject) - if entry is None: - return False - return entry.canInvoke(name) - - def canGrant(self, subject): - """Check whether a subject is permitted to grant access to others. - - :arg str subject: The SSL certificate Subject Common Name to - check against. - :returns: A boolean indicating whether the action should be permitted. - """ - entry = self.subjects.get(subject) - if entry is None: - return False - if not entry.grant: - return False - return True - - def grantInvoke(self, subject, invoke): - """Grant permission to invoke certain functions. - - :arg str subject: The SSL certificate Subject Common Name to which - the entry applies. - :arg str invoke: A regular expression that matches the jobs - that connections with this certificate are permitted to - invoke. Also implies the permission to cancel the same - set of jobs in the queue. - """ - e = self.subjects.get(subject) - if not e: - e = ACLEntry(subject) - self.add(e) - e.setInvoke(invoke) - - def grantRegister(self, subject, register): - """Grant permission to register certain functions. - - :arg str subject: The SSL certificate Subject Common Name to which - the entry applies. - :arg str register: A regular expression that matches the jobs that - connections with this certificate are permitted to register. - """ - e = self.subjects.get(subject) - if not e: - e = ACLEntry(subject) - self.add(e) - e.setRegister(register) - - def grantGrant(self, subject): - """Grant permission to grant permissions to other connections. - - :arg str subject: The SSL certificate Subject Common Name to which - the entry applies. - """ - e = self.subjects.get(subject) - if not e: - e = ACLEntry(subject) - self.add(e) - e.setGrant(True) - - def revokeInvoke(self, subject): - """Revoke permission to invoke all functions. - - :arg str subject: The SSL certificate Subject Common Name to which - the entry applies. - """ - e = self.subjects.get(subject) - if e: - e.setInvoke(None) - if e.isEmpty(): - self.remove(subject) - - def revokeRegister(self, subject): - """Revoke permission to register all functions. - - :arg str subject: The SSL certificate Subject Common Name to which - the entry applies. - """ - e = self.subjects.get(subject) - if e: - e.setRegister(None) - if e.isEmpty(): - self.remove(subject) - - def revokeGrant(self, subject): - """Revoke permission to grant permissions to other connections. - - :arg str subject: The SSL certificate Subject Common Name to which - the entry applies. - """ - e = self.subjects.get(subject) - if e: - e.setGrant(False) - if e.isEmpty(): - self.remove(subject) diff --git a/libra/gear/constants.py b/libra/gear/constants.py deleted file mode 100644 index 27512789..00000000 --- a/libra/gear/constants.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Protocol Constants -================== - -These are not necessary for normal API usage. See the `Gearman -protocol reference `_ for an explanation -of each of these. - -Magic Codes ------------ - -.. py:data:: REQ - - The Gearman magic code for a request. - -.. py:data:: RES - - The Gearman magic code for a response. - -Packet Types ------------- - -""" - -types = { - 1: 'CAN_DO', - 2: 'CANT_DO', - 3: 'RESET_ABILITIES', - 4: 'PRE_SLEEP', - # unused - 6: 'NOOP', - 7: 'SUBMIT_JOB', - 8: 'JOB_CREATED', - 9: 'GRAB_JOB', - 10: 'NO_JOB', - 11: 'JOB_ASSIGN', - 12: 'WORK_STATUS', - 13: 'WORK_COMPLETE', - 14: 'WORK_FAIL', - 15: 'GET_STATUS', - 16: 'ECHO_REQ', - 17: 'ECHO_RES', - 18: 'SUBMIT_JOB_BG', - 19: 'ERROR', - 20: 'STATUS_RES', - 21: 'SUBMIT_JOB_HIGH', - 22: 'SET_CLIENT_ID', - 23: 'CAN_DO_TIMEOUT', - 24: 'ALL_YOURS', - 25: 'WORK_EXCEPTION', - 26: 'OPTION_REQ', - 27: 'OPTION_RES', - 28: 'WORK_DATA', - 29: 'WORK_WARNING', - 30: 'GRAB_JOB_UNIQ', - 31: 'JOB_ASSIGN_UNIQ', - 32: 'SUBMIT_JOB_HIGH_BG', - 33: 'SUBMIT_JOB_LOW', - 34: 'SUBMIT_JOB_LOW_BG', - 35: 'SUBMIT_JOB_SCHED', - 36: 'SUBMIT_JOB_EPOCH', -} - -for i, name in types.items(): - globals()[name] = i - __doc__ += '\n.. py:data:: %s\n' % name - -REQ = b'\x00REQ' -RES = b'\x00RES' diff --git a/libra/mgm/__init__.py b/libra/mgm/__init__.py deleted file mode 100644 index b64863db..00000000 --- a/libra/mgm/__init__.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - - -mgm_group = cfg.OptGroup('mgm', 'Libra Pool Manager options') - -cfg.CONF.register_group(mgm_group) - -cfg.CONF.register_opts( - [ - cfg.IntOpt('az', - required=True, - help='The az the nodes and IPs will reside in (to be ' - 'passed to the API server'), - cfg.StrOpt('pid', - default='/var/run/libra/libra_mgm.pid', - help='PID file'), - cfg.StrOpt('node_basename', - help='prepend the name of all nodes with this'), - cfg.StrOpt('nova_auth_url', - required=True, - help='the auth URL for the Nova API'), - cfg.StrOpt('nova_user', - required=True, - secret=True, - help='the username for the Nova API'), - cfg.StrOpt('nova_pass', - required=True, - secret=True, - help='the password for the Nova API'), - cfg.StrOpt('nova_region', - required=True, - help='the region to use for the Nova API'), - cfg.StrOpt('nova_tenant', - help='the tenant name for the Nova API'), - cfg.StrOpt('nova_tenant_id', - help='the tenant ID for the Nova API'), - cfg.StrOpt('nova_keyname', - required=True, - help='the key name for new nodes spun up in the Nova API'), - cfg.StrOpt('nova_secgroup', - required=True, - help='the security group for new nodes spun up in the ' - 'Nova API'), - cfg.StrOpt('nova_image', - required=True, - help='the image ID or name to use for new nodes spun up ' - 'in the Nova API'), - cfg.StrOpt('nova_image_size', - required=True, - help='the image size ID (flavor ID) or name to use for ' - 'new nodes spun up in the Nova API'), - cfg.StrOpt('nova_az_name', - help='the az name to build in'), - cfg.BoolOpt('nova_insecure', - default=False, - help='do not attempt to verify Nova/Keystone SSL ' - 'certificates'), - cfg.StrOpt('nova_bypass_url', - help='use a different URL to the one supplied by the ' - 'service'), - cfg.StrOpt('nova_net_id', - help='The ID of the network to put loadbalancer on ' - '(Required if multiple Neutron networks)'), - cfg.BoolOpt('rm_fip_ignore_500', - default=False, - help='Ignore HTTP 500 error when removing a floating IP'), - cfg.IntOpt('tcp_check_port', - help='Port number to ping to check floating IP assign ' - 'worked'), - cfg.IntOpt('threads', - default=4, - help='Number of worker threads to spawn'), - cfg.IntOpt('build_diag_timeout', - default=10, - help='Timeout in seconds the pool manager will wait for a worker to complete the post-build diagnostic tests.'), - ], - group=mgm_group -) diff --git a/libra/mgm/controllers/__init__.py b/libra/mgm/controllers/__init__.py deleted file mode 100644 index 582348cb..00000000 --- a/libra/mgm/controllers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/mgm/controllers/build.py b/libra/mgm/controllers/build.py deleted file mode 100644 index 7df2c85d..00000000 --- a/libra/mgm/controllers/build.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from time import sleep -from novaclient import exceptions -from oslo.config import cfg -from gearman.constants import JOB_UNKNOWN -from libra.openstack.common import log -from libra.common.json_gearman import JSONGearmanClient -from libra.mgm.nova import Node, BuildError, NotFound - - -LOG = log.getLogger(__name__) - - -class BuildController(object): - - RESPONSE_FIELD = 'response' - RESPONSE_SUCCESS = 'PASS' - RESPONSE_FAILURE = 'FAIL' - - def __init__(self, msg): - self.msg = msg - - def run(self): - try: - nova = Node() - except Exception: - LOG.exception("Error initialising Nova connection") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - LOG.info("Building a requested Nova instance") - try: - node_id = nova.build() - LOG.info("Build command sent to Nova") - except BuildError as exc: - LOG.exception( - "{0}, node {1}".format(exc.msg, exc.node_name) - ) - name = exc.node_name - # Node may have built despite error - try: - node_id = nova.get_node(name) - except NotFound: - LOG.error( - "No node found for {0}, giving up on it".format(name) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - except exceptions.ClientException: - LOG.exception( - 'Error getting failed node info from Nova for {0}' - .format(name) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - if node_id > 0: - self._wait_until_node_ready(nova, node_id) - if self.msg[self.RESPONSE_FIELD] == self.RESPONSE_SUCCESS: - status = self._test_node(self.msg['name']) - if not status: - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - else: - LOG.error( - 'Node build did not return an ID, cannot find it' - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - def _wait_until_node_ready(self, nova, node_id): - for x in xrange(1, 10): - try: - resp, status = nova.status(node_id) - except NotFound: - LOG.error( - 'Node {0} can no longer be found'.format(node_id) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - except exceptions.ClientException: - LOG.exception( - 'Error getting status from Nova' - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - if resp.status_code not in(200, 203): - LOG.error( - 'Error geting status from Nova, error {0}' - .format(resp.status_code) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - status = status['server'] - if status['status'] == 'ACTIVE': - self.msg['name'] = status['name'] - addresses = status['addresses'].itervalues().next() - for address in addresses: - if not address['addr'].startswith('10.'): - break - self.msg['addr'] = address['addr'] - self.msg['type'] = "basename: {0}, image: {1}".format( - cfg.CONF['mgm']['node_basename'], - cfg.CONF['mgm']['nova_image'] - ) - self.msg['az'] = cfg.CONF['mgm']['az'] - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - LOG.info('Node {0} returned'.format(status['name'])) - return self.msg - sleep(60) - - LOG.error( - "Node {0} didn't come up after 10 minutes".format(node_id) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - def _test_node(self, name): - """ Run diags on node, blow it away if bad """ - server_list = [] - for server in cfg.CONF['gearman']['servers']: - host, port = server.split(':') - server_list.append({'host': host, - 'port': int(port), - 'keyfile': cfg.CONF['gearman']['ssl_key'], - 'certfile': cfg.CONF['gearman']['ssl_cert'], - 'ca_certs': cfg.CONF['gearman']['ssl_ca'], - 'keepalive': cfg.CONF['gearman']['keepalive'], - 'keepcnt': cfg.CONF['gearman']['keepcnt'], - 'keepidle': cfg.CONF['gearman']['keepidle'], - 'keepintvl': cfg.CONF['gearman']['keepintvl']}) - gm_client = JSONGearmanClient(server_list) - - job_data = {'hpcs_action': 'DIAGNOSTICS'} - job_status = gm_client.submit_job( - str(name), job_data, background=False, wait_until_complete=True, - max_retries=10, poll_timeout=10 - ) - if job_status.state == JOB_UNKNOWN: - # Gearman server connect fail, count as bad node because we can't - # tell if it really is working - LOG.error('Could not talk to gearman server') - return False - if job_status.timed_out: - LOG.warning('Timeout getting diags from {0}'.format(name)) - return False - LOG.debug(job_status.result) - # Would only happen if DIAGNOSTICS call not supported - if job_status.result['hpcs_response'] == 'FAIL': - return True - - if job_status.result['network'] == 'FAIL': - return False - - gearman_count = 0 - gearman_fail = 0 - for gearman_test in job_status.result['gearman']: - gearman_count += 1 - if gearman_test['status'] == 'FAIL': - LOG.info( - 'Device {0} cannot talk to gearman {1}' - .format(name, gearman_test['host']) - ) - gearman_fail += 1 - # Need 2/3rds gearman up - max_fail_count = gearman_count / 3 - if gearman_fail > max_fail_count: - return False - return True diff --git a/libra/mgm/controllers/delete.py b/libra/mgm/controllers/delete.py deleted file mode 100644 index 2e4f2268..00000000 --- a/libra/mgm/controllers/delete.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from libra.mgm.nova import Node, NotFound -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class DeleteController(object): - - RESPONSE_FIELD = 'response' - RESPONSE_SUCCESS = 'PASS' - RESPONSE_FAILURE = 'FAIL' - - def __init__(self, msg): - self.msg = msg - - def run(self): - try: - nova = Node() - except Exception: - LOG.exception("Error initialising Nova connection") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - LOG.info( - "Deleting a requested Nova instance {0}".format(self.msg['name']) - ) - try: - node_id = nova.get_node(self.msg['name']) - except NotFound: - LOG.error( - "No node found for {0}".format(self.msg['name']) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - nova.delete(node_id) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - LOG.info( - 'Deleted node {0}, id {1}'.format(self.msg['name'], node_id) - ) - return self.msg diff --git a/libra/mgm/controllers/root.py b/libra/mgm/controllers/root.py deleted file mode 100644 index 1b8456a7..00000000 --- a/libra/mgm/controllers/root.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from libra.mgm.controllers.build import BuildController -from libra.mgm.controllers.delete import DeleteController -from libra.mgm.controllers.vip import BuildIpController, AssignIpController -from libra.mgm.controllers.vip import RemoveIpController, DeleteIpController -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class PoolMgmController(object): - - ACTION_FIELD = 'action' - RESPONSE_FIELD = 'response' - RESPONSE_SUCCESS = 'PASS' - RESPONSE_FAILURE = 'FAIL' - - def __init__(self, json_msg): - self.msg = json_msg - - def run(self): - if self.ACTION_FIELD not in self.msg: - LOG.error("Missing `{0}` value".format(self.ACTION_FIELD)) - self.msg[self.RESPONSE_FILED] = self.RESPONSE_FAILURE - return self.msg - - action = self.msg[self.ACTION_FIELD].upper() - - try: - if action == 'BUILD_DEVICE': - controller = BuildController(self.msg) - elif action == 'DELETE_DEVICE': - controller = DeleteController(self.msg) - elif action == 'BUILD_IP': - controller = BuildIpController(self.msg) - elif action == 'ASSIGN_IP': - controller = AssignIpController(self.msg) - elif action == 'REMOVE_IP': - controller = RemoveIpController(self.msg) - elif action == 'DELETE_IP': - controller = DeleteIpController(self.msg) - else: - LOG.error( - "Invalid `{0}` value: {1}".format( - self.ACTION_FIELD, action - ) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - self.msg = controller.run() - # Delete a built device if it has failed - if ( - action == 'BUILD_DEVICE' - and self.msg[self.RESPONSE_FIELD] == self.RESPONSE_FAILURE - and 'name' in self.msg - ): - delete_msg = {'name': self.msg['name']} - controller = DeleteController(delete_msg) - controller.run() - - return self.msg - except Exception: - LOG.exception("Controller exception") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg diff --git a/libra/mgm/controllers/vip.py b/libra/mgm/controllers/vip.py deleted file mode 100644 index ce386435..00000000 --- a/libra/mgm/controllers/vip.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket -import time -from novaclient import exceptions -from oslo.config import cfg - -from libra.mgm.nova import Node -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class BuildIpController(object): - - RESPONSE_FIELD = 'response' - RESPONSE_SUCCESS = 'PASS' - RESPONSE_FAILURE = 'FAIL' - - def __init__(self, msg): - self.msg = msg - - def run(self): - try: - nova = Node() - except Exception: - LOG.exception("Error initialising Nova connection") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - LOG.info("Creating a requested floating IP") - try: - ip_info = nova.vip_create() - except exceptions.ClientException: - LOG.exception( - 'Error getting a Floating IP' - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - LOG.info("Floating IP {0} created".format(ip_info['id'])) - self.msg['id'] = ip_info['id'] - self.msg['ip'] = ip_info['ip'] - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - -class AssignIpController(object): - - RESPONSE_FIELD = 'response' - RESPONSE_SUCCESS = 'PASS' - RESPONSE_FAILURE = 'FAIL' - - def __init__(self, msg): - self.msg = msg - - def run(self): - try: - nova = Node() - except Exception: - LOG.exception("Error initialising Nova connection") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - LOG.info( - "Assigning Floating IP {0} to {1}" - .format(self.msg['ip'], self.msg['name']) - ) - try: - node_id = nova.get_node(self.msg['name']) - LOG.info( - 'Node name {0} identified as ID {1}' - .format(self.msg['name'], node_id) - ) - nova.vip_assign(node_id, self.msg['ip']) - - self._wait_until_ip_assigned(nova, node_id, self.msg['ip']) - - if cfg.CONF['mgm']['tcp_check_port']: - self.check_ip(self.msg['ip'], - cfg.CONF['mgm']['tcp_check_port']) - except: - LOG.exception( - 'Error assigning Floating IP {0} to {1}' - .format(self.msg['ip'], self.msg['name']) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - def check_ip(self, ip, port): - # TCP connect check to see if floating IP was assigned correctly - loop_count = 0 - while True: - try: - sock = socket.socket() - sock.settimeout(5) - sock.connect((ip, port)) - sock.close() - return True - except socket.error: - try: - sock.close() - except: - pass - loop_count += 1 - if loop_count >= 5: - LOG.error( - "TCP connect error after floating IP assign {0}" - .format(ip) - ) - raise - time.sleep(2) - - def _wait_until_ip_assigned(self, nova, node_id, vip): - current_instance_id = None - # We can check the status for up to 24 seconds since the assign - # attempts five times. All attempts must be before the Gearman - # message times out at two minutes, so let's aim for - # trying five times in ~20 secs each of the five attempts - for x in xrange(1, 6): - try: - current_instance_id = nova.vip_get_instance_id(vip) - LOG.debug("Confirmed VIP {0} is assigned to instance ID {1}" - .format(vip, current_instance_id) - ) - if current_instance_id == node_id: - return - except: - pass - LOG.debug("VIP has instance ID {0} but was assigned to " \ - "instance {1}, sleeping" - .format(current_instance_id, node_id) - ) - if x < 5: - time.sleep(5) - raise Exception('VIP instance ID did not match assigned ' \ - 'instance ID after 20 secs. Failing assignment') - -class RemoveIpController(object): - - RESPONSE_FIELD = 'response' - RESPONSE_SUCCESS = 'PASS' - RESPONSE_FAILURE = 'FAIL' - - def __init__(self, msg): - self.msg = msg - - def run(self): - try: - nova = Node() - except Exception: - LOG.exception("Error initialising Nova connection") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - LOG.info( - "Removing Floating IP {0} from {1}" - .format(self.msg['ip'], self.msg['name']) - ) - try: - node_id = nova.get_node(self.msg['name']) - nova.vip_remove(node_id, self.msg['ip']) - except: - LOG.exception( - 'Error removing Floating IP {0} from {1}' - .format(self.msg['ip'], self.msg['name']) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - -class DeleteIpController(object): - - RESPONSE_FIELD = 'response' - RESPONSE_SUCCESS = 'PASS' - RESPONSE_FAILURE = 'FAIL' - - def __init__(self, msg): - self.msg = msg - - def run(self): - try: - nova = Node() - except Exception: - LOG.exception("Error initialising Nova connection") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - LOG.info( - "Deleting Floating IP {0}" - .format(self.msg['ip']) - ) - try: - nova.vip_delete(self.msg['ip']) - except: - LOG.exception( - 'Error deleting Floating IP {0}' - .format(self.msg['ip']) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg diff --git a/libra/mgm/gearman_worker.py b/libra/mgm/gearman_worker.py deleted file mode 100644 index d8d53e65..00000000 --- a/libra/mgm/gearman_worker.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import gearman.errors -import json -import socket -import time - -from oslo.config import cfg - -from libra.common.json_gearman import JSONGearmanWorker -from libra.mgm.controllers.root import PoolMgmController -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -def handler(worker, job): - LOG.debug("Received JSON message: {0}".format(json.dumps(job.data))) - controller = PoolMgmController(job.data) - response = controller.run() - LOG.debug("Return JSON message: {0}".format(json.dumps(response))) - return response - - -def worker_thread(): - LOG.info("Registering task libra_pool_mgm") - hostname = socket.gethostname() - - server_list = [] - for host_port in cfg.CONF['gearman']['servers']: - host, port = host_port.split(':') - server_list.append({'host': host, - 'port': int(port), - 'keyfile': cfg.CONF['gearman']['ssl_key'], - 'certfile': cfg.CONF['gearman']['ssl_cert'], - 'ca_certs': cfg.CONF['gearman']['ssl_ca'], - 'keepalive': cfg.CONF['gearman']['keepalive'], - 'keepcnt': cfg.CONF['gearman']['keepcnt'], - 'keepidle': cfg.CONF['gearman']['keepidle'], - 'keepintvl': cfg.CONF['gearman']['keepintvl']}) - worker = JSONGearmanWorker(server_list) - - worker.set_client_id(hostname) - worker.register_task('libra_pool_mgm', handler) - worker.logger = LOG - - retry = True - - while (retry): - try: - worker.work(cfg.CONF['gearman']['poll']) - except KeyboardInterrupt: - retry = False - except gearman.errors.ServerUnavailable: - LOG.error("Job server(s) went away. Reconnecting.") - time.sleep(cfg.CONF['gearman']['reconnect_sleep']) - retry = True - except Exception: - LOG.exception("Exception in worker") - retry = False - - LOG.debug("Pool manager process terminated.") diff --git a/libra/mgm/mgm.py b/libra/mgm/mgm.py deleted file mode 100644 index df64035e..00000000 --- a/libra/mgm/mgm.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import daemon -import daemon.pidfile -import daemon.runner -import grp -import logging as std_logging -import pwd -import threading - -from libra import __version__ -from libra.common.log import get_descriptors -from libra.common.options import CONF -from libra.common.options import add_common_opts -from libra.common.options import check_gearman_ssl_files -from libra.openstack.common import log as logging -from libra.mgm.gearman_worker import worker_thread - - -LOG = logging.getLogger(__name__) - - -class Server(object): - def main(self): - - try: - check_gearman_ssl_files() - except Exception as e: - LOG.critical(str(e)) - return - - LOG.info( - 'Libra Pool Manager worker started, spawning {0} threads' - .format(CONF['mgm']['threads']) - ) - thread_list = [] - for x in xrange(0, CONF['mgm']['threads']): - thd = threading.Thread( - target=worker_thread, args=[] - ) - thd.daemon = True - thread_list.append(thd) - thd.start() - for thd in thread_list: - thd.join() - - -def main(): - add_common_opts() - CONF(project='libra', version=__version__) - - logging.setup('libra') - - LOG.debug('Configuration:') - CONF.log_opt_values(LOG, std_logging.DEBUG) - - server = Server() - - if not CONF['daemon']: - server.main() - else: - pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['mgm']['pid'], 10) - if daemon.runner.is_pidfile_stale(pidfile): - pidfile.break_lock() - - descriptors = get_descriptors() - context = daemon.DaemonContext( - working_directory='/', - umask=0o022, - pidfile=pidfile, - files_preserve=descriptors - ) - if CONF['user']: - context.uid = pwd.getpwnam(CONF['user']).pw_uid - if CONF['group']: - context.gid = grp.getgrnam(CONF['group']).gr_gid - - context.open() - server.main() - - return 0 diff --git a/libra/mgm/nova.py b/libra/mgm/nova.py deleted file mode 100644 index d53d2a47..00000000 --- a/libra/mgm/nova.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid -import sys -import urllib - -from oslo.config import cfg - -from novaclient import client -from novaclient import exceptions - - -class NotFound(Exception): - pass - - -class BuildError(Exception): - def __init__(self, msg, node_name, node_id=0): - self.msg = msg - self.node_name = node_name - self.node_id = node_id - - def __str__(self): - return self.msg - - -class Node(object): - def __init__(self): - self.nova = client.HTTPClient( - cfg.CONF['mgm']['nova_user'], - cfg.CONF['mgm']['nova_pass'], - cfg.CONF['mgm']['nova_tenant'], - cfg.CONF['mgm']['nova_auth_url'], - region_name=cfg.CONF['mgm']['nova_region'], - no_cache=True, - insecure=cfg.CONF['mgm']['nova_insecure'], - tenant_id=cfg.CONF['mgm']['nova_tenant_id'], - bypass_url=cfg.CONF['mgm']['nova_bypass_url'], - service_type='compute' - ) - self.keyname = cfg.CONF['mgm']['nova_keyname'] - self.secgroup = cfg.CONF['mgm']['nova_secgroup'] - self.node_basename = cfg.CONF['mgm']['node_basename'] - self.az = cfg.CONF['mgm']['nova_az_name'] - self.net_id = cfg.CONF['mgm']['nova_net_id'] - self.rm_fip_ignore_500 = cfg.CONF['mgm']['rm_fip_ignore_500'] - - # Replace '_' with '-' in basename - if self.node_basename: - self.node_basename = self.node_basename.replace('_', '-') - - self.image = cfg.CONF['mgm']['nova_image'] - - image_size = cfg.CONF['mgm']['nova_image_size'] - if image_size.isdigit(): - self.node_type = image_size - else: - self.node_type = self._get_flavor(image_size) - - def build(self): - """ create a node, test it is running """ - node_id = uuid.uuid1() - try: - body = self._create(node_id) - except exceptions.ClientException, e: - msg = 'Error creating node, exception {exc}' \ - 'Message: {msg} Details: {details}' - raise BuildError(msg.format(exc=sys.exc_info()[0], msg=e.message, - details=e.details), node_id) - - return body['server']['id'] - - def vip_create(self): - """ create a virtual IP """ - url = '/os-floating-ips' - body = {"pool": None} - resp, body = self.nova.post(url, body=body) - return body['floating_ip'] - - def vip_assign(self, node_id, vip): - """ assign a virtual IP to a Nova instance """ - url = '/servers/{0}/action'.format(node_id) - body = { - "addFloatingIp": { - "address": vip - } - } - resp, body = self.nova.post(url, body=body) - if resp.status_code != 202: - raise Exception( - 'Response code {0}, message {1} when assigning vip' - .format(resp.status_code, body) - ) - - def vip_remove(self, node_id, vip): - """ delete a virtual IP from a Nova instance """ - url = '/servers/{0}/action'.format(node_id) - body = { - "removeFloatingIp": { - "address": vip - } - } - try: - resp, body = self.nova.post(url, body=body) - except exceptions.ClientException as e: - if e.code == 500 and self.rm_fip_ignore_500: - return - raise - - if resp.status_code != 202: - raise Exception( - 'Response code {0}, message {1} when removing vip' - .format(resp.status_code, body) - ) - - def vip_delete(self, vip): - """ delete a virtual IP """ - vip_id = self._find_vip_id(vip) - url = '/os-floating-ips/{0}'.format(vip_id) - # sometimes this needs to be tried twice - try: - resp, body = self.nova.delete(url) - except exceptions.ClientException: - resp, body = self.nova.delete(url) - - def vip_get_instance_id(self, vip): - """ get the instance id owning the vip """ - vip_id = self._find_vip_id(vip) - url = '/os-floating-ips/{0}'.format(vip_id) - resp, body = self.nova.get(url) - if resp.status_code != 200: - raise Exception( - 'Response code {0}, message {1} when getting ' \ - 'floating IP {2} details' - .format(resp.status_code, body, vip) - ) - return body['floating_ip']['instance_id'] - - def _find_vip_id(self, vip): - url = '/os-floating-ips' - resp, body = self.nova.get(url) - for fip in body['floating_ips']: - if fip['ip'] == vip: - return fip['id'] - raise NotFound('floating IP not found') - - def delete(self, node_id): - """ delete a node """ - try: - resp = self._delete(node_id) - except exceptions.ClientException: - return False, 'Error deleting node {nid} exception {exc}'.format( - nid=node_id, exc=sys.exc_info()[0] - ) - - if resp.status_code != 204: - return False, 'Error deleting node {nid} status {stat}'.format( - nid=node_id, stat=resp.status_code - ) - - return True, '' - - def _create(self, node_id): - """ create a nova node """ - url = "/servers" - if self.node_basename: - node_name = '{0}-{1}'.format(self.node_basename, node_id) - else: - node_name = '{0}'.format(node_id) - - networks = [] - if self.net_id: - networks.append({"uuid": self.net_id}) - - body = {"server": { - "name": node_name, - "imageRef": self.image, - "key_name": self.keyname, - "flavorRef": self.node_type, - "max_count": 1, - "min_count": 1, - "availability_zone": self.az, - "networks": networks, - "security_groups": [{"name": self.secgroup}] - }} - resp, body = self.nova.post(url, body=body) - return body - - def status(self, node_id): - """ used to keep scanning to see if node is up """ - url = "/servers/{0}".format(node_id) - try: - resp, body = self.nova.get(url) - except exceptions.NotFound: - msg = "Could not find node with id {0}".format(node_id) - raise NotFound(msg) - - return resp, body - - def _delete(self, node_id): - """ delete a nova node, return 204 succeed """ - url = "/servers/{0}".format(node_id) - resp, body = self.nova.delete(url) - - return resp - - # TODO: merge get_node and _get_image to remove duplication of code - - def get_node(self, node_name): - """ tries to find a node from the name """ - args = {'name': node_name} - url = "/servers?{0}".format(urllib.urlencode(args)) - try: - resp, body = self.nova.get(url) - except exceptions.NotFound: - msg = "Could not find node with name {0}".format(node_name) - raise NotFound(msg) - if resp.status_code not in [200, 203]: - msg = "Error {0} searching for node with name {1}".format( - resp.status_code, node_name - ) - raise NotFound(msg) - if len(body['servers']) != 1: - msg = "Could not find node with name {0}".format(node_name) - raise NotFound(msg) - return body['servers'][0]['id'] - - def _get_image(self, image_name): - """ tries to find an image from the name """ - args = {'name': image_name} - url = "/images?{0}".format(urllib.urlencode(args)) - resp, body = self.nova.get(url) - if resp.status_code not in [200, 203]: - msg = "Error {0} searching for image with name {1}".format( - resp.status_code, image_name - ) - raise NotFound(msg) - if len(body['images']) != 1: - msg = "Could not find image with name {0}".format(image_name) - raise NotFound(msg) - return body['images'][0]['id'] - - def _get_flavor(self, flavor_name): - """ tries to find a flavor from the name """ - url = "/flavors" - resp, body = self.nova.get(url) - if resp.status_code not in [200, 203]: - msg = "Error {0} searching for flavor with name {1}".format( - resp.status_code, flavor_name - ) - raise NotFound(msg) - for flavor in body['flavors']: - if flavor['name'] == flavor_name: - return flavor['id'] - msg = "Could not find flavor with name {0}".format(flavor_name) - raise NotFound(msg) diff --git a/libra/openstack/__init__.py b/libra/openstack/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libra/openstack/common/__init__.py b/libra/openstack/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libra/openstack/common/context.py b/libra/openstack/common/context.py deleted file mode 100644 index 0e7a48ad..00000000 --- a/libra/openstack/common/context.py +++ /dev/null @@ -1,86 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Simple class that stores security context information in the web request. - -Projects should subclass this class if they wish to enhance the request -context or provide additional information in their specific WSGI pipeline. -""" - -import itertools - -from libra.openstack.common import uuidutils - - -def generate_request_id(): - return 'req-%s' % uuidutils.generate_uuid() - - -class RequestContext(object): - - """Helper class to represent useful information about a request context. - - Stores information about the security context under which the user - accesses the system, as well as additional request information. - """ - - def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, - read_only=False, show_deleted=False, request_id=None, - instance_uuid=None): - self.auth_token = auth_token - self.user = user - self.tenant = tenant - self.is_admin = is_admin - self.read_only = read_only - self.show_deleted = show_deleted - self.instance_uuid = instance_uuid - if not request_id: - request_id = generate_request_id() - self.request_id = request_id - - def to_dict(self): - return {'user': self.user, - 'tenant': self.tenant, - 'is_admin': self.is_admin, - 'read_only': self.read_only, - 'show_deleted': self.show_deleted, - 'auth_token': self.auth_token, - 'request_id': self.request_id, - 'instance_uuid': self.instance_uuid} - - -def get_admin_context(show_deleted=False): - context = RequestContext(None, - tenant=None, - is_admin=True, - show_deleted=show_deleted) - return context - - -def get_context_from_function_and_args(function, args, kwargs): - """Find an arg of type RequestContext and return it. - - This is useful in a couple of decorators where we don't - know much about the function we're wrapping. - """ - - for arg in itertools.chain(kwargs.values(), args): - if isinstance(arg, RequestContext): - return arg - - return None diff --git a/libra/openstack/common/crypto/__init__.py b/libra/openstack/common/crypto/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libra/openstack/common/crypto/utils.py b/libra/openstack/common/crypto/utils.py deleted file mode 100644 index 9d3218ba..00000000 --- a/libra/openstack/common/crypto/utils.py +++ /dev/null @@ -1,179 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 - -from Crypto.Hash import HMAC -from Crypto import Random - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils - - -class CryptoutilsException(Exception): - """Generic Exception for Crypto utilities.""" - - message = _("An unknown error occurred in crypto utils.") - - -class CipherBlockLengthTooBig(CryptoutilsException): - """The block size is too big.""" - - def __init__(self, requested, permitted): - msg = _("Block size of %(given)d is too big, max = %(maximum)d") - message = msg % {'given': requested, 'maximum': permitted} - super(CryptoutilsException, self).__init__(message) - - -class HKDFOutputLengthTooLong(CryptoutilsException): - """The amount of Key Material asked is too much.""" - - def __init__(self, requested, permitted): - msg = _("Length of %(given)d is too long, max = %(maximum)d") - message = msg % {'given': requested, 'maximum': permitted} - super(CryptoutilsException, self).__init__(message) - - -class HKDF(object): - """An HMAC-based Key Derivation Function implementation (RFC5869) - - This class creates an object that allows to use HKDF to derive keys. - """ - - def __init__(self, hashtype='SHA256'): - self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype) - self.max_okm_length = 255 * self.hashfn.digest_size - - def extract(self, ikm, salt=None): - """An extract function that can be used to derive a robust key given - weak Input Key Material (IKM) which could be a password. - Returns a pseudorandom key (of HashLen octets) - - :param ikm: input keying material (ex a password) - :param salt: optional salt value (a non-secret random value) - """ - if salt is None: - salt = '\x00' * self.hashfn.digest_size - - return HMAC.new(salt, ikm, self.hashfn).digest() - - def expand(self, prk, info, length): - """An expand function that will return arbitrary length output that can - be used as keys. - Returns a buffer usable as key material. - - :param prk: a pseudorandom key of at least HashLen octets - :param info: optional string (can be a zero-length string) - :param length: length of output keying material (<= 255 * HashLen) - """ - if length > self.max_okm_length: - raise HKDFOutputLengthTooLong(length, self.max_okm_length) - - N = (length + self.hashfn.digest_size - 1) / self.hashfn.digest_size - - okm = "" - tmp = "" - for block in range(1, N + 1): - tmp = HMAC.new(prk, tmp + info + chr(block), self.hashfn).digest() - okm += tmp - - return okm[:length] - - -MAX_CB_SIZE = 256 - - -class SymmetricCrypto(object): - """Symmetric Key Crypto object. - - This class creates a Symmetric Key Crypto object that can be used - to encrypt, decrypt, or sign arbitrary data. - - :param enctype: Encryption Cipher name (default: AES) - :param hashtype: Hash/HMAC type name (default: SHA256) - """ - - def __init__(self, enctype='AES', hashtype='SHA256'): - self.cipher = importutils.import_module('Crypto.Cipher.' + enctype) - self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype) - - def new_key(self, size): - return Random.new().read(size) - - def encrypt(self, key, msg, b64encode=True): - """Encrypt the provided msg and returns the cyphertext optionally - base64 encoded. - - Uses AES-128-CBC with a Random IV by default. - - The plaintext is padded to reach blocksize length. - The last byte of the block is the length of the padding. - The length of the padding does not include the length byte itself. - - :param key: The Encryption key. - :param msg: the plain text. - - :returns encblock: a block of encrypted data. - """ - iv = Random.new().read(self.cipher.block_size) - cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv) - - # CBC mode requires a fixed block size. Append padding and length of - # padding. - if self.cipher.block_size > MAX_CB_SIZE: - raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE) - r = len(msg) % self.cipher.block_size - padlen = self.cipher.block_size - r - 1 - msg += '\x00' * padlen - msg += chr(padlen) - - enc = iv + cipher.encrypt(msg) - if b64encode: - enc = base64.b64encode(enc) - return enc - - def decrypt(self, key, msg, b64decode=True): - """Decrypts the provided ciphertext, optionally base 64 encoded, and - returns the plaintext message, after padding is removed. - - Uses AES-128-CBC with an IV by default. - - :param key: The Encryption key. - :param msg: the ciphetext, the first block is the IV - """ - if b64decode: - msg = base64.b64decode(msg) - iv = msg[:self.cipher.block_size] - cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv) - - padded = cipher.decrypt(msg[self.cipher.block_size:]) - l = ord(padded[-1]) + 1 - plain = padded[:-l] - return plain - - def sign(self, key, msg, b64encode=True): - """Signs a message string and returns a base64 encoded signature. - - Uses HMAC-SHA-256 by default. - - :param key: The Signing key. - :param msg: the message to sign. - """ - h = HMAC.new(key, msg, self.hashfn) - out = h.digest() - if b64encode: - out = base64.b64encode(out) - return out diff --git a/libra/openstack/common/eventlet_backdoor.py b/libra/openstack/common/eventlet_backdoor.py deleted file mode 100644 index 9babb377..00000000 --- a/libra/openstack/common/eventlet_backdoor.py +++ /dev/null @@ -1,146 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 OpenStack Foundation. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import errno -import gc -import os -import pprint -import socket -import sys -import traceback - -import eventlet -import eventlet.backdoor -import greenlet -from oslo.config import cfg - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging - -help_for_backdoor_port = ( - "Acceptable values are 0, , and :, where 0 results " - "in listening on a random tcp port number; results in listening " - "on the specified port number (and not enabling backdoor if that port " - "is in use); and : results in listening on the smallest " - "unused port number within the specified range of port numbers. The " - "chosen port is displayed in the service's log file.") -eventlet_backdoor_opts = [ - cfg.StrOpt('backdoor_port', - default=None, - help="Enable eventlet backdoor. %s" % help_for_backdoor_port) -] - -CONF = cfg.CONF -CONF.register_opts(eventlet_backdoor_opts) -LOG = logging.getLogger(__name__) - - -class EventletBackdoorConfigValueError(Exception): - def __init__(self, port_range, help_msg, ex): - msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' - '%(help)s' % - {'range': port_range, 'ex': ex, 'help': help_msg}) - super(EventletBackdoorConfigValueError, self).__init__(msg) - self.port_range = port_range - - -def _dont_use_this(): - print("Don't use this, just disconnect instead") - - -def _find_objects(t): - return filter(lambda o: isinstance(o, t), gc.get_objects()) - - -def _print_greenthreads(): - for i, gt in enumerate(_find_objects(greenlet.greenlet)): - print(i, gt) - traceback.print_stack(gt.gr_frame) - print() - - -def _print_nativethreads(): - for threadId, stack in sys._current_frames().items(): - print(threadId) - traceback.print_stack(stack) - print() - - -def _parse_port_range(port_range): - if ':' not in port_range: - start, end = port_range, port_range - else: - start, end = port_range.split(':', 1) - try: - start, end = int(start), int(end) - if end < start: - raise ValueError - return start, end - except ValueError as ex: - raise EventletBackdoorConfigValueError(port_range, ex, - help_for_backdoor_port) - - -def _listen(host, start_port, end_port, listen_func): - try_port = start_port - while True: - try: - return listen_func((host, try_port)) - except socket.error as exc: - if (exc.errno != errno.EADDRINUSE or - try_port >= end_port): - raise - try_port += 1 - - -def initialize_if_enabled(): - backdoor_locals = { - 'exit': _dont_use_this, # So we don't exit the entire process - 'quit': _dont_use_this, # So we don't exit the entire process - 'fo': _find_objects, - 'pgt': _print_greenthreads, - 'pnt': _print_nativethreads, - } - - if CONF.backdoor_port is None: - return None - - start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) - - # NOTE(johannes): The standard sys.displayhook will print the value of - # the last expression and set it to __builtin__._, which overwrites - # the __builtin__._ that gettext sets. Let's switch to using pprint - # since it won't interact poorly with gettext, and it's easier to - # read the output too. - def displayhook(val): - if val is not None: - pprint.pprint(val) - sys.displayhook = displayhook - - sock = _listen('localhost', start_port, end_port, eventlet.listen) - - # In the case of backdoor port being zero, a port number is assigned by - # listen(). In any case, pull the port number out here. - port = sock.getsockname()[1] - LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % - {'port': port, 'pid': os.getpid()}) - eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, - locals=backdoor_locals) - return port diff --git a/libra/openstack/common/excutils.py b/libra/openstack/common/excutils.py deleted file mode 100644 index e3d7ab53..00000000 --- a/libra/openstack/common/excutils.py +++ /dev/null @@ -1,101 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exception related utilities. -""" - -import logging -import sys -import time -import traceback - -import six - -from libra.openstack.common.gettextutils import _ # noqa - - -class save_and_reraise_exception(object): - """Save current exception, run some code and then re-raise. - - In some cases the exception context can be cleared, resulting in None - being attempted to be re-raised after an exception handler is run. This - can happen when eventlet switches greenthreads or when running an - exception handler, code raises and catches an exception. In both - cases the exception context will be cleared. - - To work around this, we save the exception state, run handler code, and - then re-raise the original exception. If another exception occurs, the - saved exception is logged and the new exception is re-raised. - - In some cases the caller may not want to re-raise the exception, and - for those circumstances this context provides a reraise flag that - can be used to suppress the exception. For example: - - except Exception: - with save_and_reraise_exception() as ctxt: - decide_if_need_reraise() - if not should_be_reraised: - ctxt.reraise = False - """ - def __init__(self): - self.reraise = True - - def __enter__(self): - self.type_, self.value, self.tb, = sys.exc_info() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - logging.error(_('Original exception being dropped: %s'), - traceback.format_exception(self.type_, - self.value, - self.tb)) - return False - if self.reraise: - six.reraise(self.type_, self.value, self.tb) - - -def forever_retry_uncaught_exceptions(infunc): - def inner_func(*args, **kwargs): - last_log_time = 0 - last_exc_message = None - exc_count = 0 - while True: - try: - return infunc(*args, **kwargs) - except Exception as exc: - this_exc_message = six.u(str(exc)) - if this_exc_message == last_exc_message: - exc_count += 1 - else: - exc_count = 1 - # Do not log any more frequently than once a minute unless - # the exception message changes - cur_time = int(time.time()) - if (cur_time - last_log_time > 60 or - this_exc_message != last_exc_message): - logging.exception( - _('Unexpected exception occurred %d time(s)... ' - 'retrying.') % exc_count) - last_log_time = cur_time - last_exc_message = this_exc_message - exc_count = 0 - # This should be a very rare event. In case it isn't, do - # a sleep. - time.sleep(1) - return inner_func diff --git a/libra/openstack/common/fileutils.py b/libra/openstack/common/fileutils.py deleted file mode 100644 index a73086c3..00000000 --- a/libra/openstack/common/fileutils.py +++ /dev/null @@ -1,139 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import errno -import os -import tempfile - -from libra.openstack.common import excutils -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -_FILE_CACHE = {} - - -def ensure_tree(path): - """Create a directory (and any ancestor directories required) - - :param path: Directory to create - """ - try: - os.makedirs(path) - except OSError as exc: - if exc.errno == errno.EEXIST: - if not os.path.isdir(path): - raise - else: - raise - - -def read_cached_file(filename, force_reload=False): - """Read from a file if it has been modified. - - :param force_reload: Whether to reload the file. - :returns: A tuple with a boolean specifying if the data is fresh - or not. - """ - global _FILE_CACHE - - if force_reload and filename in _FILE_CACHE: - del _FILE_CACHE[filename] - - reloaded = False - mtime = os.path.getmtime(filename) - cache_info = _FILE_CACHE.setdefault(filename, {}) - - if not cache_info or mtime > cache_info.get('mtime', 0): - LOG.debug(_("Reloading cached file %s") % filename) - with open(filename) as fap: - cache_info['data'] = fap.read() - cache_info['mtime'] = mtime - reloaded = True - return (reloaded, cache_info['data']) - - -def delete_if_exists(path, remove=os.unlink): - """Delete a file, but ignore file not found error. - - :param path: File to delete - :param remove: Optional function to remove passed path - """ - - try: - remove(path) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - -@contextlib.contextmanager -def remove_path_on_error(path, remove=delete_if_exists): - """Protect code that wants to operate on PATH atomically. - Any exception will cause PATH to be removed. - - :param path: File to work with - :param remove: Optional function to remove passed path - """ - - try: - yield - except Exception: - with excutils.save_and_reraise_exception(): - remove(path) - - -def file_open(*args, **kwargs): - """Open file - - see built-in file() documentation for more details - - Note: The reason this is kept in a separate module is to easily - be able to provide a stub module that doesn't alter system - state at all (for unit tests) - """ - return file(*args, **kwargs) - - -def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): - """Create temporary file or use existing file. - - This util is needed for creating temporary file with - specified content, suffix and prefix. If path is not None, - it will be used for writing content. If the path doesn't - exist it'll be created. - - :param content: content for temporary file. - :param path: same as parameter 'dir' for mkstemp - :param suffix: same as parameter 'suffix' for mkstemp - :param prefix: same as parameter 'prefix' for mkstemp - - For example: it can be used in database tests for creating - configuration files. - """ - if path: - ensure_tree(path) - - (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) - try: - os.write(fd, content) - finally: - os.close(fd) - return path diff --git a/libra/openstack/common/fixture/__init__.py b/libra/openstack/common/fixture/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libra/openstack/common/fixture/config.py b/libra/openstack/common/fixture/config.py deleted file mode 100644 index 7b044ef7..00000000 --- a/libra/openstack/common/fixture/config.py +++ /dev/null @@ -1,46 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mirantis, Inc. -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import fixtures -from oslo.config import cfg -import six - - -class Config(fixtures.Fixture): - """Override some configuration values. - - The keyword arguments are the names of configuration options to - override and their values. - - If a group argument is supplied, the overrides are applied to - the specified configuration option group. - - All overrides are automatically cleared at the end of the current - test by the reset() method, which is registred by addCleanup(). - """ - - def __init__(self, conf=cfg.CONF): - self.conf = conf - - def setUp(self): - super(Config, self).setUp() - self.addCleanup(self.conf.reset) - - def config(self, **kw): - group = kw.pop('group', None) - for k, v in six.iteritems(kw): - self.conf.set_override(k, v, group) diff --git a/libra/openstack/common/fixture/lockutils.py b/libra/openstack/common/fixture/lockutils.py deleted file mode 100644 index e284de0a..00000000 --- a/libra/openstack/common/fixture/lockutils.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - -from libra.openstack.common.lockutils import lock - - -class LockFixture(fixtures.Fixture): - """External locking fixture. - - This fixture is basically an alternative to the synchronized decorator with - the external flag so that tearDowns and addCleanups will be included in - the lock context for locking between tests. The fixture is recommended to - be the first line in a test method, like so:: - - def test_method(self): - self.useFixture(LockFixture) - ... - - or the first line in setUp if all the test methods in the class are - required to be serialized. Something like:: - - class TestCase(testtools.testcase): - def setUp(self): - self.useFixture(LockFixture) - super(TestCase, self).setUp() - ... - - This is because addCleanups are put on a LIFO queue that gets run after the - test method exits. (either by completing or raising an exception) - """ - def __init__(self, name, lock_file_prefix=None): - self.mgr = lock(name, lock_file_prefix, True) - - def setUp(self): - super(LockFixture, self).setUp() - self.addCleanup(self.mgr.__exit__, None, None, None) - self.mgr.__enter__() diff --git a/libra/openstack/common/fixture/mockpatch.py b/libra/openstack/common/fixture/mockpatch.py deleted file mode 100644 index cd0d6ca6..00000000 --- a/libra/openstack/common/fixture/mockpatch.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock - - -class PatchObject(fixtures.Fixture): - """Deal with code around mock.""" - - def __init__(self, obj, attr, **kwargs): - self.obj = obj - self.attr = attr - self.kwargs = kwargs - - def setUp(self): - super(PatchObject, self).setUp() - _p = mock.patch.object(self.obj, self.attr, **self.kwargs) - self.mock = _p.start() - self.addCleanup(_p.stop) - - -class Patch(fixtures.Fixture): - - """Deal with code around mock.patch.""" - - def __init__(self, obj, **kwargs): - self.obj = obj - self.kwargs = kwargs - - def setUp(self): - super(Patch, self).setUp() - _p = mock.patch(self.obj, **self.kwargs) - self.mock = _p.start() - self.addCleanup(_p.stop) diff --git a/libra/openstack/common/fixture/moxstubout.py b/libra/openstack/common/fixture/moxstubout.py deleted file mode 100644 index a0e74fd1..00000000 --- a/libra/openstack/common/fixture/moxstubout.py +++ /dev/null @@ -1,34 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mox - - -class MoxStubout(fixtures.Fixture): - """Deal with code around mox and stubout as a fixture.""" - - def setUp(self): - super(MoxStubout, self).setUp() - # emulate some of the mox stuff, we can't use the metaclass - # because it screws with our generators - self.mox = mox.Mox() - self.stubs = self.mox.stubs - self.addCleanup(self.mox.UnsetStubs) - self.addCleanup(self.mox.VerifyAll) diff --git a/libra/openstack/common/gettextutils.py b/libra/openstack/common/gettextutils.py deleted file mode 100644 index 2c3acc56..00000000 --- a/libra/openstack/common/gettextutils.py +++ /dev/null @@ -1,373 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -gettext for openstack-common modules. - -Usual usage in an openstack.common module: - - from libra.openstack.common.gettextutils import _ -""" - -import copy -import gettext -import logging -import os -import re -try: - import UserString as _userString -except ImportError: - import collections as _userString - -from babel import localedata -import six - -_localedir = os.environ.get('libra'.upper() + '_LOCALEDIR') -_t = gettext.translation('libra', localedir=_localedir, fallback=True) - -_AVAILABLE_LANGUAGES = {} -USE_LAZY = False - - -def enable_lazy(): - """Convenience function for configuring _() to use lazy gettext - - Call this at the start of execution to enable the gettextutils._ - function to use lazy gettext functionality. This is useful if - your project is importing _ directly instead of using the - gettextutils.install() way of importing the _ function. - """ - global USE_LAZY - USE_LAZY = True - - -def _(msg): - if USE_LAZY: - return Message(msg, 'libra') - else: - if six.PY3: - return _t.gettext(msg) - return _t.ugettext(msg) - - -def install(domain, lazy=False): - """Install a _() function using the given translation domain. - - Given a translation domain, install a _() function using gettext's - install() function. - - The main difference from gettext.install() is that we allow - overriding the default localedir (e.g. /usr/share/locale) using - a translation-domain-specific environment variable (e.g. - NOVA_LOCALEDIR). - - :param domain: the translation domain - :param lazy: indicates whether or not to install the lazy _() function. - The lazy _() introduces a way to do deferred translation - of messages by installing a _ that builds Message objects, - instead of strings, which can then be lazily translated into - any available locale. - """ - if lazy: - # NOTE(mrodden): Lazy gettext functionality. - # - # The following introduces a deferred way to do translations on - # messages in OpenStack. We override the standard _() function - # and % (format string) operation to build Message objects that can - # later be translated when we have more information. - # - # Also included below is an example LocaleHandler that translates - # Messages to an associated locale, effectively allowing many logs, - # each with their own locale. - - def _lazy_gettext(msg): - """Create and return a Message object. - - Lazy gettext function for a given domain, it is a factory method - for a project/module to get a lazy gettext function for its own - translation domain (i.e. nova, glance, cinder, etc.) - - Message encapsulates a string so that we can translate - it later when needed. - """ - return Message(msg, domain) - - from six import moves - moves.builtins.__dict__['_'] = _lazy_gettext - else: - localedir = '%s_LOCALEDIR' % domain.upper() - if six.PY3: - gettext.install(domain, - localedir=os.environ.get(localedir)) - else: - gettext.install(domain, - localedir=os.environ.get(localedir), - unicode=True) - - -class Message(_userString.UserString, object): - """Class used to encapsulate translatable messages.""" - def __init__(self, msg, domain): - # _msg is the gettext msgid and should never change - self._msg = msg - self._left_extra_msg = '' - self._right_extra_msg = '' - self._locale = None - self.params = None - self.domain = domain - - @property - def data(self): - # NOTE(mrodden): this should always resolve to a unicode string - # that best represents the state of the message currently - - localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') - if self.locale: - lang = gettext.translation(self.domain, - localedir=localedir, - languages=[self.locale], - fallback=True) - else: - # use system locale for translations - lang = gettext.translation(self.domain, - localedir=localedir, - fallback=True) - - if six.PY3: - ugettext = lang.gettext - else: - ugettext = lang.ugettext - - full_msg = (self._left_extra_msg + - ugettext(self._msg) + - self._right_extra_msg) - - if self.params is not None: - full_msg = full_msg % self.params - - return six.text_type(full_msg) - - @property - def locale(self): - return self._locale - - @locale.setter - def locale(self, value): - self._locale = value - if not self.params: - return - - # This Message object may have been constructed with one or more - # Message objects as substitution parameters, given as a single - # Message, or a tuple or Map containing some, so when setting the - # locale for this Message we need to set it for those Messages too. - if isinstance(self.params, Message): - self.params.locale = value - return - if isinstance(self.params, tuple): - for param in self.params: - if isinstance(param, Message): - param.locale = value - return - if isinstance(self.params, dict): - for param in self.params.values(): - if isinstance(param, Message): - param.locale = value - - def _save_dictionary_parameter(self, dict_param): - full_msg = self.data - # look for %(blah) fields in string; - # ignore %% and deal with the - # case where % is first character on the line - keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg) - - # if we don't find any %(blah) blocks but have a %s - if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): - # apparently the full dictionary is the parameter - params = copy.deepcopy(dict_param) - else: - params = {} - for key in keys: - try: - params[key] = copy.deepcopy(dict_param[key]) - except TypeError: - # cast uncopyable thing to unicode string - params[key] = six.text_type(dict_param[key]) - - return params - - def _save_parameters(self, other): - # we check for None later to see if - # we actually have parameters to inject, - # so encapsulate if our parameter is actually None - if other is None: - self.params = (other, ) - elif isinstance(other, dict): - self.params = self._save_dictionary_parameter(other) - else: - # fallback to casting to unicode, - # this will handle the problematic python code-like - # objects that cannot be deep-copied - try: - self.params = copy.deepcopy(other) - except TypeError: - self.params = six.text_type(other) - - return self - - # overrides to be more string-like - def __unicode__(self): - return self.data - - def __str__(self): - if six.PY3: - return self.__unicode__() - return self.data.encode('utf-8') - - def __getstate__(self): - to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', - 'domain', 'params', '_locale'] - new_dict = self.__dict__.fromkeys(to_copy) - for attr in to_copy: - new_dict[attr] = copy.deepcopy(self.__dict__[attr]) - - return new_dict - - def __setstate__(self, state): - for (k, v) in state.items(): - setattr(self, k, v) - - # operator overloads - def __add__(self, other): - copied = copy.deepcopy(self) - copied._right_extra_msg += other.__str__() - return copied - - def __radd__(self, other): - copied = copy.deepcopy(self) - copied._left_extra_msg += other.__str__() - return copied - - def __mod__(self, other): - # do a format string to catch and raise - # any possible KeyErrors from missing parameters - self.data % other - copied = copy.deepcopy(self) - return copied._save_parameters(other) - - def __mul__(self, other): - return self.data * other - - def __rmul__(self, other): - return other * self.data - - def __getitem__(self, key): - return self.data[key] - - def __getslice__(self, start, end): - return self.data.__getslice__(start, end) - - def __getattribute__(self, name): - # NOTE(mrodden): handle lossy operations that we can't deal with yet - # These override the UserString implementation, since UserString - # uses our __class__ attribute to try and build a new message - # after running the inner data string through the operation. - # At that point, we have lost the gettext message id and can just - # safely resolve to a string instead. - ops = ['capitalize', 'center', 'decode', 'encode', - 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', - 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] - if name in ops: - return getattr(self.data, name) - else: - return _userString.UserString.__getattribute__(self, name) - - -def get_available_languages(domain): - """Lists the available languages for the given translation domain. - - :param domain: the domain to get languages for - """ - if domain in _AVAILABLE_LANGUAGES: - return copy.copy(_AVAILABLE_LANGUAGES[domain]) - - localedir = '%s_LOCALEDIR' % domain.upper() - find = lambda x: gettext.find(domain, - localedir=os.environ.get(localedir), - languages=[x]) - - # NOTE(mrodden): en_US should always be available (and first in case - # order matters) since our in-line message strings are en_US - language_list = ['en_US'] - # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list - # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and all projects udpate - list_identifiers = (getattr(localedata, 'list', None) or - getattr(localedata, 'locale_identifiers')) - locale_identifiers = list_identifiers() - for i in locale_identifiers: - if find(i) is not None: - language_list.append(i) - _AVAILABLE_LANGUAGES[domain] = language_list - return copy.copy(language_list) - - -def get_localized_message(message, user_locale): - """Gets a localized version of the given message in the given locale. - - If the message is not a Message object the message is returned as-is. - If the locale is None the message is translated to the default locale. - - :returns: the translated message in unicode, or the original message if - it could not be translated - """ - translated = message - if isinstance(message, Message): - original_locale = message.locale - message.locale = user_locale - translated = six.text_type(message) - message.locale = original_locale - return translated - - -class LocaleHandler(logging.Handler): - """Handler that can have a locale associated to translate Messages. - - A quick example of how to utilize the Message class above. - LocaleHandler takes a locale and a target logging.Handler object - to forward LogRecord objects to after translating the internal Message. - """ - - def __init__(self, locale, target): - """Initialize a LocaleHandler - - :param locale: locale to use for translating messages - :param target: logging.Handler object to forward - LogRecord objects to after translation - """ - logging.Handler.__init__(self) - self.locale = locale - self.target = target - - def emit(self, record): - if isinstance(record.msg, Message): - # set the locale and resolve to a string - record.msg.locale = self.locale - - self.target.emit(record) diff --git a/libra/openstack/common/importutils.py b/libra/openstack/common/importutils.py deleted file mode 100644 index 7a303f93..00000000 --- a/libra/openstack/common/importutils.py +++ /dev/null @@ -1,68 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Import related utilities and helper functions. -""" - -import sys -import traceback - - -def import_class(import_str): - """Returns a class from a string including module and class.""" - mod_str, _sep, class_str = import_str.rpartition('.') - try: - __import__(mod_str) - return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): - raise ImportError('Class %s cannot be found (%s)' % - (class_str, - traceback.format_exception(*sys.exc_info()))) - - -def import_object(import_str, *args, **kwargs): - """Import a class and return an instance of it.""" - return import_class(import_str)(*args, **kwargs) - - -def import_object_ns(name_space, import_str, *args, **kwargs): - """Tries to import object from default namespace. - - Imports a class and return an instance of it, first by trying - to find the class in a default namespace, then failing back to - a full path if not found in the default namespace. - """ - import_value = "%s.%s" % (name_space, import_str) - try: - return import_class(import_value)(*args, **kwargs) - except ImportError: - return import_class(import_str)(*args, **kwargs) - - -def import_module(import_str): - """Import a module.""" - __import__(import_str) - return sys.modules[import_str] - - -def try_import(import_str, default=None): - """Try to import a module and if it fails return default.""" - try: - return import_module(import_str) - except ImportError: - return default diff --git a/libra/openstack/common/jsonutils.py b/libra/openstack/common/jsonutils.py deleted file mode 100644 index 4b2479d8..00000000 --- a/libra/openstack/common/jsonutils.py +++ /dev/null @@ -1,180 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -JSON related utilities. - -This module provides a few things: - - 1) A handy function for getting an object down to something that can be - JSON serialized. See to_primitive(). - - 2) Wrappers around loads() and dumps(). The dumps() wrapper will - automatically use to_primitive() for you if needed. - - 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson - is available. -''' - - -import datetime -import functools -import inspect -import itertools -import json -try: - import xmlrpclib -except ImportError: - # NOTE(jd): xmlrpclib is not shipped with Python 3 - xmlrpclib = None - -import six - -from libra.openstack.common import gettextutils -from libra.openstack.common import importutils -from libra.openstack.common import timeutils - -netaddr = importutils.try_import("netaddr") - -_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, - inspect.isfunction, inspect.isgeneratorfunction, - inspect.isgenerator, inspect.istraceback, inspect.isframe, - inspect.iscode, inspect.isbuiltin, inspect.isroutine, - inspect.isabstract] - -_simple_types = (six.string_types + six.integer_types - + (type(None), bool, float)) - - -def to_primitive(value, convert_instances=False, convert_datetime=True, - level=0, max_depth=3): - """Convert a complex object into primitives. - - Handy for JSON serialization. We can optionally handle instances, - but since this is a recursive function, we could have cyclical - data structures. - - To handle cyclical data structures we could track the actual objects - visited in a set, but not all objects are hashable. Instead we just - track the depth of the object inspections and don't go too deep. - - Therefore, convert_instances=True is lossy ... be aware. - - """ - # handle obvious types first - order of basic types determined by running - # full tests on nova project, resulting in the following counts: - # 572754 - # 460353 - # 379632 - # 274610 - # 199918 - # 114200 - # 51817 - # 26164 - # 6491 - # 283 - # 19 - if isinstance(value, _simple_types): - return value - - if isinstance(value, datetime.datetime): - if convert_datetime: - return timeutils.strtime(value) - else: - return value - - # value of itertools.count doesn't get caught by nasty_type_tests - # and results in infinite loop when list(value) is called. - if type(value) == itertools.count: - return six.text_type(value) - - # FIXME(vish): Workaround for LP bug 852095. Without this workaround, - # tests that raise an exception in a mocked method that - # has a @wrap_exception with a notifier will fail. If - # we up the dependency to 0.5.4 (when it is released) we - # can remove this workaround. - if getattr(value, '__module__', None) == 'mox': - return 'mock' - - if level > max_depth: - return '?' - - # The try block may not be necessary after the class check above, - # but just in case ... - try: - recursive = functools.partial(to_primitive, - convert_instances=convert_instances, - convert_datetime=convert_datetime, - level=level, - max_depth=max_depth) - if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in value.iteritems()) - elif isinstance(value, (list, tuple)): - return [recursive(lv) for lv in value] - - # It's not clear why xmlrpclib created their own DateTime type, but - # for our purposes, make it a datetime type which is explicitly - # handled - if xmlrpclib and isinstance(value, xmlrpclib.DateTime): - value = datetime.datetime(*tuple(value.timetuple())[:6]) - - if convert_datetime and isinstance(value, datetime.datetime): - return timeutils.strtime(value) - elif isinstance(value, gettextutils.Message): - return value.data - elif hasattr(value, 'iteritems'): - return recursive(dict(value.iteritems()), level=level + 1) - elif hasattr(value, '__iter__'): - return recursive(list(value)) - elif convert_instances and hasattr(value, '__dict__'): - # Likely an instance of something. Watch for cycles. - # Ignore class member vars. - return recursive(value.__dict__, level=level + 1) - elif netaddr and isinstance(value, netaddr.IPAddress): - return six.text_type(value) - else: - if any(test(value) for test in _nasty_type_tests): - return six.text_type(value) - return value - except TypeError: - # Class objects are tricky since they may define something like - # __iter__ defined but it isn't callable as list(). - return six.text_type(value) - - -def dumps(value, default=to_primitive, **kwargs): - return json.dumps(value, default=default, **kwargs) - - -def loads(s): - return json.loads(s) - - -def load(s): - return json.load(s) - - -try: - import anyjson -except ImportError: - pass -else: - anyjson._modules.append((__name__, 'dumps', TypeError, - 'loads', ValueError, 'load')) - anyjson.force_implementation(__name__) diff --git a/libra/openstack/common/local.py b/libra/openstack/common/local.py deleted file mode 100644 index e82f17d0..00000000 --- a/libra/openstack/common/local.py +++ /dev/null @@ -1,47 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Local storage of variables using weak references""" - -import threading -import weakref - - -class WeakLocal(threading.local): - def __getattribute__(self, attr): - rval = super(WeakLocal, self).__getattribute__(attr) - if rval: - # NOTE(mikal): this bit is confusing. What is stored is a weak - # reference, not the value itself. We therefore need to lookup - # the weak reference and return the inner value here. - rval = rval() - return rval - - def __setattr__(self, attr, value): - value = weakref.ref(value) - return super(WeakLocal, self).__setattr__(attr, value) - - -# NOTE(mikal): the name "store" should be deprecated in the future -store = WeakLocal() - -# A "weak" store uses weak references and allows an object to fall out of scope -# when it falls out of scope in the code that uses the thread local storage. A -# "strong" store will hold a reference to the object so that it never falls out -# of scope. -weak_store = WeakLocal() -strong_store = threading.local() diff --git a/libra/openstack/common/lockutils.py b/libra/openstack/common/lockutils.py deleted file mode 100644 index fb2d1243..00000000 --- a/libra/openstack/common/lockutils.py +++ /dev/null @@ -1,278 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import errno -import functools -import os -import threading -import time -import weakref - -from oslo.config import cfg - -from libra.openstack.common import fileutils -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import local -from libra.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -util_opts = [ - cfg.BoolOpt('disable_process_locking', default=False, - help='Whether to disable inter-process locks'), - cfg.StrOpt('lock_path', - help=('Directory to use for lock files.')) -] - - -CONF = cfg.CONF -CONF.register_opts(util_opts) - - -def set_defaults(lock_path): - cfg.set_defaults(util_opts, lock_path=lock_path) - - -class _InterProcessLock(object): - """Lock implementation which allows multiple locks, working around - issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does - not require any cleanup. Since the lock is always held on a file - descriptor rather than outside of the process, the lock gets dropped - automatically if the process crashes, even if __exit__ is not executed. - - There are no guarantees regarding usage by multiple green threads in a - single process here. This lock works only between processes. Exclusive - access between local threads should be achieved using the semaphores - in the @synchronized decorator. - - Note these locks are released when the descriptor is closed, so it's not - safe to close the file descriptor while another green thread holds the - lock. Just opening and closing the lock file can break synchronisation, - so lock files must be accessed only using this abstraction. - """ - - def __init__(self, name): - self.lockfile = None - self.fname = name - - def __enter__(self): - self.lockfile = open(self.fname, 'w') - - while True: - try: - # Using non-blocking locks since green threads are not - # patched to deal with blocking locking calls. - # Also upon reading the MSDN docs for locking(), it seems - # to have a laughable 10 attempts "blocking" mechanism. - self.trylock() - return self - except IOError as e: - if e.errno in (errno.EACCES, errno.EAGAIN): - # external locks synchronise things like iptables - # updates - give it some time to prevent busy spinning - time.sleep(0.01) - else: - raise - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - self.unlock() - self.lockfile.close() - except IOError: - LOG.exception(_("Could not release the acquired lock `%s`"), - self.fname) - - def trylock(self): - raise NotImplementedError() - - def unlock(self): - raise NotImplementedError() - - -class _WindowsLock(_InterProcessLock): - def trylock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) - - def unlock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) - - -class _PosixLock(_InterProcessLock): - def trylock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) - - def unlock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_UN) - - -if os.name == 'nt': - import msvcrt - InterProcessLock = _WindowsLock -else: - import fcntl - InterProcessLock = _PosixLock - -_semaphores = weakref.WeakValueDictionary() - - -@contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): - """Context based lock - - This function yields a `threading.Semaphore` instance (if we don't use - eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is - True, in which case, it'll yield an InterProcessLock instance. - - :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. - - :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. - - :param lock_path: The lock_path keyword argument is used to specify a - special location for external lock files to live. If nothing is set, then - CONF.lock_path is used as a default. - """ - # NOTE(soren): If we ever go natively threaded, this will be racy. - # See http://stackoverflow.com/questions/5390569/dyn - # amically-allocating-and-destroying-mutexes - sem = _semaphores.get(name, threading.Semaphore()) - if name not in _semaphores: - # this check is not racy - we're already holding ref locally - # so GC won't remove the item and there was no IO switch - # (only valid in greenthreads) - _semaphores[name] = sem - - with sem: - LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) - - # NOTE(mikal): I know this looks odd - if not hasattr(local.strong_store, 'locks_held'): - local.strong_store.locks_held = [] - local.strong_store.locks_held.append(name) - - try: - if external and not CONF.disable_process_locking: - LOG.debug(_('Attempting to grab file lock "%(lock)s"'), - {'lock': name}) - - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path or CONF.lock_path - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - LOG.info(_('Created lock path: %s'), local_lock_path) - - def add_prefix(name, prefix): - if not prefix: - return name - sep = '' if prefix.endswith('-') else '-' - return '%s%s%s' % (prefix, sep, name) - - # NOTE(mikal): the lock name cannot contain directory - # separators - lock_file_name = add_prefix(name.replace(os.sep, '_'), - lock_file_prefix) - - lock_file_path = os.path.join(local_lock_path, lock_file_name) - - try: - lock = InterProcessLock(lock_file_path) - with lock as lock: - LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - yield lock - finally: - LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - else: - yield sem - - finally: - local.strong_store.locks_held.remove(name) - - -def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): - """Synchronization decorator. - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one thread will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - """ - - def wrap(f): - @functools.wraps(f) - def inner(*args, **kwargs): - try: - with lock(name, lock_file_prefix, external, lock_path): - LOG.debug(_('Got semaphore / lock "%(function)s"'), - {'function': f.__name__}) - return f(*args, **kwargs) - finally: - LOG.debug(_('Semaphore / lock released "%(function)s"'), - {'function': f.__name__}) - return inner - return wrap - - -def synchronized_with_prefix(lock_file_prefix): - """Partial object generator for the synchronization decorator. - - Redefine @synchronized in each project like so:: - - (in nova/utils.py) - from nova.openstack.common import lockutils - - synchronized = lockutils.synchronized_with_prefix('nova-') - - - (in nova/foo.py) - from nova import utils - - @utils.synchronized('mylock') - def bar(self, *args): - ... - - The lock_file_prefix argument is used to provide lock files on disk with a - meaningful prefix. - """ - - return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) diff --git a/libra/openstack/common/log.py b/libra/openstack/common/log.py deleted file mode 100644 index 46cbc45c..00000000 --- a/libra/openstack/common/log.py +++ /dev/null @@ -1,625 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Openstack logging handler. - -This module adds to logging functionality by adding the option to specify -a context object when calling the various log methods. If the context object -is not specified, default formatting is used. Additionally, an instance uuid -may be passed as part of the log message, which is intended to make it easier -for admins to find messages related to a specific instance. - -It also allows setting of formatting information through conf. - -""" - -import inspect -import itertools -import logging -import logging.config -import logging.handlers -import os -import re -import sys -import traceback - -from oslo.config import cfg -import six -from six import moves - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils -from libra.openstack.common import jsonutils -from libra.openstack.common import local - - -_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - -_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password'] - -# NOTE(ldbragst): Let's build a list of regex objects using the list of -# _SANITIZE_KEYS we already have. This way, we only have to add the new key -# to the list of _SANITIZE_KEYS and we can generate regular expressions -# for XML and JSON automatically. -_SANITIZE_PATTERNS = [] -_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', - r'(<%(key)s>).*?()', - r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] - -for key in _SANITIZE_KEYS: - for pattern in _FORMAT_PATTERNS: - reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS.append(reg_ex) - - -common_cli_opts = [ - cfg.BoolOpt('debug', - short='d', - default=False, - help='Print debugging output (set logging level to ' - 'DEBUG instead of default WARNING level).'), - cfg.BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output (set logging level to ' - 'INFO instead of default WARNING level).'), -] - -logging_cli_opts = [ - cfg.StrOpt('log-config-append', - metavar='PATH', - deprecated_name='log-config', - help='The name of logging configuration file. It does not ' - 'disable existing loggers, but just appends specified ' - 'logging configuration to any other existing logging ' - 'options. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), - cfg.StrOpt('log-format', - default=None, - metavar='FORMAT', - help='DEPRECATED. ' - 'A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'This option is deprecated. Please use ' - 'logging_context_format_string and ' - 'logging_default_format_string instead.'), - cfg.StrOpt('log-date-format', - default=_DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), - cfg.StrOpt('log-file', - metavar='PATH', - deprecated_name='logfile', - help='(Optional) Name of log file to output to. ' - 'If no default is set, logging will go to stdout.'), - cfg.StrOpt('log-dir', - deprecated_name='logdir', - help='(Optional) The base directory used for relative ' - '--log-file paths'), - cfg.BoolOpt('use-syslog', - default=False, - help='Use syslog for logging.'), - cfg.StrOpt('syslog-log-facility', - default='LOG_USER', - help='syslog facility to receive log lines') -] - -generic_log_opts = [ - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error') -] - -log_opts = [ - cfg.StrOpt('logging_context_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user)s %(tenant)s] ' - '%(instance)s%(message)s', - help='format string to use for log messages with context'), - cfg.StrOpt('logging_default_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [-] %(instance)s%(message)s', - help='format string to use for log messages without context'), - cfg.StrOpt('logging_debug_format_suffix', - default='%(funcName)s %(pathname)s:%(lineno)d', - help='data to append to log format when level is DEBUG'), - cfg.StrOpt('logging_exception_prefix', - default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' - '%(instance)s', - help='prefix each line of exception output with this format'), - cfg.ListOpt('default_log_levels', - default=[ - 'amqp=WARN', - 'amqplib=WARN', - 'boto=WARN', - 'keystone=INFO', - 'qpid=WARN', - 'sqlalchemy=WARN', - 'suds=INFO', - ], - help='list of logger=LEVEL pairs'), - cfg.BoolOpt('publish_errors', - default=False, - help='publish error events'), - cfg.BoolOpt('fatal_deprecations', - default=False, - help='make deprecations fatal'), - - # NOTE(mikal): there are two options here because sometimes we are handed - # a full instance (and could include more information), and other times we - # are just handed a UUID for the instance. - cfg.StrOpt('instance_format', - default='[instance: %(uuid)s] ', - help='If an instance is passed with the log message, format ' - 'it like this'), - cfg.StrOpt('instance_uuid_format', - default='[instance: %(uuid)s] ', - help='If an instance UUID is passed with the log message, ' - 'format it like this'), -] - -CONF = cfg.CONF -CONF.register_cli_opts(common_cli_opts) -CONF.register_cli_opts(logging_cli_opts) -CONF.register_opts(generic_log_opts) -CONF.register_opts(log_opts) - -# our new audit level -# NOTE(jkoelker) Since we synthesized an audit level, make the logging -# module aware of it so it acts like other levels. -logging.AUDIT = logging.INFO + 1 -logging.addLevelName(logging.AUDIT, 'AUDIT') - - -try: - NullHandler = logging.NullHandler -except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 - class NullHandler(logging.Handler): - def handle(self, record): - pass - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - - -def _dictify_context(context): - if context is None: - return None - if not isinstance(context, dict) and getattr(context, 'to_dict', None): - context = context.to_dict() - return context - - -def _get_binary_name(): - return os.path.basename(inspect.stack()[-1][1]) - - -def _get_log_file_path(binary=None): - logfile = CONF.log_file - logdir = CONF.log_dir - - if logfile and not logdir: - return logfile - - if logfile and logdir: - return os.path.join(logdir, logfile) - - if logdir: - binary = binary or _get_binary_name() - return '%s.log' % (os.path.join(logdir, binary),) - - return None - - -def mask_password(message, secret="***"): - """Replace password with 'secret' in message. - - :param message: The string which includes security information. - :param secret: value with which to replace passwords, defaults to "***". - :returns: The unicode value of message with the password fields masked. - - For example: - >>> mask_password("'adminPass' : 'aaaaa'") - "'adminPass' : '***'" - >>> mask_password("'admin_pass' : 'aaaaa'") - "'admin_pass' : '***'" - >>> mask_password('"password" : "aaaaa"') - '"password" : "***"' - >>> mask_password("'original_password' : 'aaaaa'") - "'original_password' : '***'" - >>> mask_password("u'original_password' : u'aaaaa'") - "u'original_password' : u'***'" - """ - message = six.text_type(message) - - # NOTE(ldbragst): Check to see if anything in message contains any key - # specified in _SANITIZE_KEYS, if not then just return the message since - # we don't have to mask any passwords. - if not any(key in message for key in _SANITIZE_KEYS): - return message - - secret = r'\g<1>' + secret + r'\g<2>' - for pattern in _SANITIZE_PATTERNS: - message = re.sub(pattern, secret, message) - return message - - -class BaseLoggerAdapter(logging.LoggerAdapter): - - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) - - -class LazyAdapter(BaseLoggerAdapter): - def __init__(self, name='unknown', version='unknown'): - self._logger = None - self.extra = {} - self.name = name - self.version = version - - @property - def logger(self): - if not self._logger: - self._logger = getLogger(self.name, self.version) - return self._logger - - -class ContextAdapter(BaseLoggerAdapter): - warn = logging.LoggerAdapter.warning - - def __init__(self, logger, project_name, version_string): - self.logger = logger - self.project = project_name - self.version = version_string - - @property - def handlers(self): - return self.logger.handlers - - def deprecated(self, msg, *args, **kwargs): - stdmsg = _("Deprecated: %s") % msg - if CONF.fatal_deprecations: - self.critical(stdmsg, *args, **kwargs) - raise DeprecatedConfig(msg=stdmsg) - else: - self.warn(stdmsg, *args, **kwargs) - - def process(self, msg, kwargs): - # NOTE(mrodden): catch any Message/other object and - # coerce to unicode before they can get - # to the python logging and possibly - # cause string encoding trouble - if not isinstance(msg, six.string_types): - msg = six.text_type(msg) - - if 'extra' not in kwargs: - kwargs['extra'] = {} - extra = kwargs['extra'] - - context = kwargs.pop('context', None) - if not context: - context = getattr(local.store, 'context', None) - if context: - extra.update(_dictify_context(context)) - - instance = kwargs.pop('instance', None) - instance_uuid = (extra.get('instance_uuid', None) or - kwargs.pop('instance_uuid', None)) - instance_extra = '' - if instance: - instance_extra = CONF.instance_format % instance - elif instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) - - extra.update({"project": self.project}) - extra.update({"version": self.version}) - extra['extra'] = extra.copy() - return msg, kwargs - - -class JSONFormatter(logging.Formatter): - def __init__(self, fmt=None, datefmt=None): - # NOTE(jkoelker) we ignore the fmt argument, but its still there - # since logging.config.fileConfig passes it. - self.datefmt = datefmt - - def formatException(self, ei, strip_newlines=True): - lines = traceback.format_exception(*ei) - if strip_newlines: - lines = [itertools.ifilter( - lambda x: x, - line.rstrip().splitlines()) for line in lines] - lines = list(itertools.chain(*lines)) - return lines - - def format(self, record): - message = {'message': record.getMessage(), - 'asctime': self.formatTime(record, self.datefmt), - 'name': record.name, - 'msg': record.msg, - 'args': record.args, - 'levelname': record.levelname, - 'levelno': record.levelno, - 'pathname': record.pathname, - 'filename': record.filename, - 'module': record.module, - 'lineno': record.lineno, - 'funcname': record.funcName, - 'created': record.created, - 'msecs': record.msecs, - 'relative_created': record.relativeCreated, - 'thread': record.thread, - 'thread_name': record.threadName, - 'process_name': record.processName, - 'process': record.process, - 'traceback': None} - - if hasattr(record, 'extra'): - message['extra'] = record.extra - - if record.exc_info: - message['traceback'] = self.formatException(record.exc_info) - - return jsonutils.dumps(message) - - -def _create_logging_excepthook(product_name): - def logging_excepthook(type, value, tb): - extra = {} - if CONF.verbose: - extra['exc_info'] = (type, value, tb) - getLogger(product_name).critical(str(value), **extra) - return logging_excepthook - - -class LogConfigError(Exception): - - message = _('Error loading logging config %(log_config)s: %(err_msg)s') - - def __init__(self, log_config, err_msg): - self.log_config = log_config - self.err_msg = err_msg - - def __str__(self): - return self.message % dict(log_config=self.log_config, - err_msg=self.err_msg) - - -def _load_log_config(log_config_append): - try: - logging.config.fileConfig(log_config_append, - disable_existing_loggers=False) - except moves.configparser.Error as exc: - raise LogConfigError(log_config_append, str(exc)) - - -def setup(product_name): - """Setup logging.""" - if CONF.log_config_append: - _load_log_config(CONF.log_config_append) - else: - _setup_logging_from_conf() - sys.excepthook = _create_logging_excepthook(product_name) - - -def set_defaults(logging_context_format_string): - cfg.set_defaults(log_opts, - logging_context_format_string= - logging_context_format_string) - - -def _find_facility_from_conf(): - facility_names = logging.handlers.SysLogHandler.facility_names - facility = getattr(logging.handlers.SysLogHandler, - CONF.syslog_log_facility, - None) - - if facility is None and CONF.syslog_log_facility in facility_names: - facility = facility_names.get(CONF.syslog_log_facility) - - if facility is None: - valid_facilities = facility_names.keys() - consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', - 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', - 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', - 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', - 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] - valid_facilities.extend(consts) - raise TypeError(_('syslog facility must be one of: %s') % - ', '.join("'%s'" % fac - for fac in valid_facilities)) - - return facility - - -def _setup_logging_from_conf(): - log_root = getLogger(None).logger - for handler in log_root.handlers: - log_root.removeHandler(handler) - - if CONF.use_syslog: - facility = _find_facility_from_conf() - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - log_root.addHandler(syslog) - - logpath = _get_log_file_path() - if logpath: - filelog = logging.handlers.WatchedFileHandler(logpath) - log_root.addHandler(filelog) - - if CONF.use_stderr: - streamlog = ColorHandler() - log_root.addHandler(streamlog) - - elif not CONF.log_file: - # pass sys.stdout as a positional argument - # python2.6 calls the argument strm, in 2.7 it's stream - streamlog = logging.StreamHandler(sys.stdout) - log_root.addHandler(streamlog) - - if CONF.publish_errors: - handler = importutils.import_object( - "openstack.common.log_handler.PublishErrorsHandler", - logging.ERROR) - log_root.addHandler(handler) - - datefmt = CONF.log_date_format - for handler in log_root.handlers: - # NOTE(alaski): CONF.log_format overrides everything currently. This - # should be deprecated in favor of context aware formatting. - if CONF.log_format: - handler.setFormatter(logging.Formatter(fmt=CONF.log_format, - datefmt=datefmt)) - log_root.info('Deprecated: log_format is now deprecated and will ' - 'be removed in the next release') - else: - handler.setFormatter(ContextFormatter(datefmt=datefmt)) - - if CONF.debug: - log_root.setLevel(logging.DEBUG) - elif CONF.verbose: - log_root.setLevel(logging.INFO) - else: - log_root.setLevel(logging.WARNING) - - for pair in CONF.default_log_levels: - mod, _sep, level_name = pair.partition('=') - level = logging.getLevelName(level_name) - logger = logging.getLogger(mod) - logger.setLevel(level) - -_loggers = {} - - -def getLogger(name='unknown', version='unknown'): - if name not in _loggers: - _loggers[name] = ContextAdapter(logging.getLogger(name), - name, - version) - return _loggers[name] - - -def getLazyLogger(name='unknown', version='unknown'): - """Returns lazy logger. - - Creates a pass-through logger that does not create the real logger - until it is really needed and delegates all calls to the real logger - once it is created. - """ - return LazyAdapter(name, version) - - -class WritableLogger(object): - """A thin wrapper that responds to `write` and logs.""" - - def __init__(self, logger, level=logging.INFO): - self.logger = logger - self.level = level - - def write(self, msg): - self.logger.log(self.level, msg) - - -class ContextFormatter(logging.Formatter): - """A context.RequestContext aware formatter configured through flags. - - The flags used to set format strings are: logging_context_format_string - and logging_default_format_string. You can also specify - logging_debug_format_suffix to append extra formatting if the log level is - debug. - - For information about what variables are available for the formatter see: - http://docs.python.org/library/logging.html#formatter - - """ - - def format(self, record): - """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formating params - # to an empty string so we don't throw an exception if - # they get used - for key in ('instance', 'color'): - if key not in record.__dict__: - record.__dict__[key] = '' - - if record.__dict__.get('request_id', None): - self._fmt = CONF.logging_context_format_string - else: - self._fmt = CONF.logging_default_format_string - - if (record.levelno == logging.DEBUG and - CONF.logging_debug_format_suffix): - self._fmt += " " + CONF.logging_debug_format_suffix - - # Cache this on the record, Logger will respect our formated copy - if record.exc_info: - record.exc_text = self.formatException(record.exc_info, record) - return logging.Formatter.format(self, record) - - def formatException(self, exc_info, record=None): - """Format exception output with CONF.logging_exception_prefix.""" - if not record: - return logging.Formatter.formatException(self, exc_info) - - stringbuffer = moves.StringIO() - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], - None, stringbuffer) - lines = stringbuffer.getvalue().split('\n') - stringbuffer.close() - - if CONF.logging_exception_prefix.find('%(asctime)') != -1: - record.asctime = self.formatTime(record, self.datefmt) - - formatted_lines = [] - for line in lines: - pl = CONF.logging_exception_prefix % record.__dict__ - fl = '%s%s' % (pl, line) - formatted_lines.append(fl) - return '\n'.join(formatted_lines) - - -class ColorHandler(logging.StreamHandler): - LEVEL_COLORS = { - logging.DEBUG: '\033[00;32m', # GREEN - logging.INFO: '\033[00;36m', # CYAN - logging.AUDIT: '\033[01;36m', # BOLD CYAN - logging.WARN: '\033[01;33m', # BOLD YELLOW - logging.ERROR: '\033[01;31m', # BOLD RED - logging.CRITICAL: '\033[01;31m', # BOLD RED - } - - def format(self, record): - record.color = self.LEVEL_COLORS[record.levelno] - return logging.StreamHandler.format(self, record) - - -class DeprecatedConfig(Exception): - message = _("Fatal call to deprecated config: %(msg)s") - - def __init__(self, msg): - super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/libra/openstack/common/loopingcall.py b/libra/openstack/common/loopingcall.py deleted file mode 100644 index 11d9477c..00000000 --- a/libra/openstack/common/loopingcall.py +++ /dev/null @@ -1,147 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from eventlet import event -from eventlet import greenthread - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging -from libra.openstack.common import timeutils - -LOG = logging.getLogger(__name__) - - -class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCall. - - The poll-function passed to LoopingCall can raise this exception to - break out of the loop normally. This is somewhat analogous to - StopIteration. - - An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCall.wait() - - """ - - def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return.""" - self.retvalue = retvalue - - -class LoopingCallBase(object): - def __init__(self, f=None, *args, **kw): - self.args = args - self.kw = kw - self.f = f - self._running = False - self.done = None - - def stop(self): - self._running = False - - def wait(self): - return self.done.wait() - - -class FixedIntervalLoopingCall(LoopingCallBase): - """A fixed interval looping call.""" - - def start(self, interval, initial_delay=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - start = timeutils.utcnow() - self.f(*self.args, **self.kw) - end = timeutils.utcnow() - if not self._running: - break - delay = interval - timeutils.delta_seconds(start, end) - if delay <= 0: - LOG.warn(_('task run outlasted interval by %s sec') % - -delay) - greenthread.sleep(delay if delay > 0 else 0) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_('in fixed duration looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn_n(_inner) - return self.done - - -# TODO(mikal): this class name is deprecated in Havana and should be removed -# in the I release -LoopingCall = FixedIntervalLoopingCall - - -class DynamicLoopingCall(LoopingCallBase): - """A looping call which sleeps until the next known event. - - The function called should return how long to sleep for before being - called again. - """ - - def start(self, initial_delay=None, periodic_interval_max=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - idle = self.f(*self.args, **self.kw) - if not self._running: - break - - if periodic_interval_max is not None: - idle = min(idle, periodic_interval_max) - LOG.debug(_('Dynamic looping call sleeping for %.02f ' - 'seconds'), idle) - greenthread.sleep(idle) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_('in dynamic looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn(_inner) - return self.done diff --git a/libra/openstack/common/network_utils.py b/libra/openstack/common/network_utils.py deleted file mode 100644 index dbed1ceb..00000000 --- a/libra/openstack/common/network_utils.py +++ /dev/null @@ -1,81 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Network-related utilities and helper functions. -""" - -import urlparse - - -def parse_host_port(address, default_port=None): - """Interpret a string as a host:port pair. - - An IPv6 address MUST be escaped if accompanied by a port, - because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 - means both [2001:db8:85a3::8a2e:370:7334] and - [2001:db8:85a3::8a2e:370]:7334. - - >>> parse_host_port('server01:80') - ('server01', 80) - >>> parse_host_port('server01') - ('server01', None) - >>> parse_host_port('server01', default_port=1234) - ('server01', 1234) - >>> parse_host_port('[::1]:80') - ('::1', 80) - >>> parse_host_port('[::1]') - ('::1', None) - >>> parse_host_port('[::1]', default_port=1234) - ('::1', 1234) - >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) - ('2001:db8:85a3::8a2e:370:7334', 1234) - - """ - if address[0] == '[': - # Escaped ipv6 - _host, _port = address[1:].split(']') - host = _host - if ':' in _port: - port = _port.split(':')[1] - else: - port = default_port - else: - if address.count(':') == 1: - host, port = address.split(':') - else: - # 0 means ipv4, >1 means ipv6. - # We prohibit unescaped ipv6 addresses with port. - host = address - port = default_port - - return (host, None if port is None else int(port)) - - -def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL using urlparse.urlsplit(), splitting query and fragments. - This function papers over Python issue9374 when needed. - - The parameters are the same as urlparse.urlsplit. - """ - scheme, netloc, path, query, fragment = urlparse.urlsplit( - url, scheme, allow_fragments) - if allow_fragments and '#' in path: - path, fragment = path.split('#', 1) - if '?' in path: - path, query = path.split('?', 1) - return urlparse.SplitResult(scheme, netloc, path, query, fragment) diff --git a/libra/openstack/common/notifier/__init__.py b/libra/openstack/common/notifier/__init__.py deleted file mode 100644 index 45c3b46a..00000000 --- a/libra/openstack/common/notifier/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/openstack/common/notifier/api.py b/libra/openstack/common/notifier/api.py deleted file mode 100644 index 96072e63..00000000 --- a/libra/openstack/common/notifier/api.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket -import uuid - -from oslo.config import cfg - -from libra.openstack.common import context -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils -from libra.openstack.common import jsonutils -from libra.openstack.common import log as logging -from libra.openstack.common import timeutils - - -LOG = logging.getLogger(__name__) - -notifier_opts = [ - cfg.MultiStrOpt('notification_driver', - default=[], - help='Driver or drivers to handle sending notifications'), - cfg.StrOpt('default_notification_level', - default='INFO', - help='Default notification level for outgoing notifications'), - cfg.StrOpt('default_publisher_id', - default=None, - help='Default publisher_id for outgoing notifications'), -] - -CONF = cfg.CONF -CONF.register_opts(notifier_opts) - -WARN = 'WARN' -INFO = 'INFO' -ERROR = 'ERROR' -CRITICAL = 'CRITICAL' -DEBUG = 'DEBUG' - -log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) - - -class BadPriorityException(Exception): - pass - - -def notify_decorator(name, fn): - """Decorator for notify which is used from utils.monkey_patch(). - - :param name: name of the function - :param function: - object of the function - :returns: function -- decorated function - - """ - def wrapped_func(*args, **kwarg): - body = {} - body['args'] = [] - body['kwarg'] = {} - for arg in args: - body['args'].append(arg) - for key in kwarg: - body['kwarg'][key] = kwarg[key] - - ctxt = context.get_context_from_function_and_args(fn, args, kwarg) - notify(ctxt, - CONF.default_publisher_id or socket.gethostname(), - name, - CONF.default_notification_level, - body) - return fn(*args, **kwarg) - return wrapped_func - - -def publisher_id(service, host=None): - if not host: - try: - host = CONF.host - except AttributeError: - host = CONF.default_publisher_id or socket.gethostname() - return "%s.%s" % (service, host) - - -def notify(context, publisher_id, event_type, priority, payload): - """Sends a notification using the specified driver - - :param publisher_id: the source worker_type.host of the message - :param event_type: the literal type of event (ex. Instance Creation) - :param priority: patterned after the enumeration of Python logging - levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) - :param payload: A python dictionary of attributes - - Outgoing message format includes the above parameters, and appends the - following: - - message_id - a UUID representing the id for this notification - - timestamp - the GMT timestamp the notification was sent at - - The composite message will be constructed as a dictionary of the above - attributes, which will then be sent via the transport mechanism defined - by the driver. - - Message example:: - - {'message_id': str(uuid.uuid4()), - 'publisher_id': 'compute.host1', - 'timestamp': timeutils.utcnow(), - 'priority': 'WARN', - 'event_type': 'compute.create_instance', - 'payload': {'instance_id': 12, ... }} - - """ - if priority not in log_levels: - raise BadPriorityException( - _('%s not in valid priorities') % priority) - - # Ensure everything is JSON serializable. - payload = jsonutils.to_primitive(payload, convert_instances=True) - - msg = dict(message_id=str(uuid.uuid4()), - publisher_id=publisher_id, - event_type=event_type, - priority=priority, - payload=payload, - timestamp=str(timeutils.utcnow())) - - for driver in _get_drivers(): - try: - driver.notify(context, msg) - except Exception as e: - LOG.exception(_("Problem '%(e)s' attempting to " - "send to notification system. " - "Payload=%(payload)s") - % dict(e=e, payload=payload)) - - -_drivers = None - - -def _get_drivers(): - """Instantiate, cache, and return drivers based on the CONF.""" - global _drivers - if _drivers is None: - _drivers = {} - for notification_driver in CONF.notification_driver: - try: - driver = importutils.import_module(notification_driver) - _drivers[notification_driver] = driver - except ImportError: - LOG.exception(_("Failed to load notifier %s. " - "These notifications will not be sent.") % - notification_driver) - return _drivers.values() - - -def _reset_drivers(): - """Used by unit tests to reset the drivers.""" - global _drivers - _drivers = None diff --git a/libra/openstack/common/notifier/log_notifier.py b/libra/openstack/common/notifier/log_notifier.py deleted file mode 100644 index 66f18765..00000000 --- a/libra/openstack/common/notifier/log_notifier.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from libra.openstack.common import jsonutils -from libra.openstack.common import log as logging - - -CONF = cfg.CONF - - -def notify(_context, message): - """Notifies the recipient of the desired event given the model. - - Log notifications using OpenStack's default logging system. - """ - - priority = message.get('priority', - CONF.default_notification_level) - priority = priority.lower() - logger = logging.getLogger( - 'libra.openstack.common.notification.%s' % - message['event_type']) - getattr(logger, priority)(jsonutils.dumps(message)) diff --git a/libra/openstack/common/notifier/no_op_notifier.py b/libra/openstack/common/notifier/no_op_notifier.py deleted file mode 100644 index 13d946e3..00000000 --- a/libra/openstack/common/notifier/no_op_notifier.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def notify(_context, message): - """Notifies the recipient of the desired event given the model.""" - pass diff --git a/libra/openstack/common/notifier/rpc_notifier.py b/libra/openstack/common/notifier/rpc_notifier.py deleted file mode 100644 index 1761d0d4..00000000 --- a/libra/openstack/common/notifier/rpc_notifier.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from libra.openstack.common import context as req_context -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging -from libra.openstack.common import rpc - -LOG = logging.getLogger(__name__) - -notification_topic_opt = cfg.ListOpt( - 'notification_topics', default=['notifications', ], - help='AMQP topic used for OpenStack notifications') - -CONF = cfg.CONF -CONF.register_opt(notification_topic_opt) - - -def notify(context, message): - """Sends a notification via RPC.""" - if not context: - context = req_context.get_admin_context() - priority = message.get('priority', - CONF.default_notification_level) - priority = priority.lower() - for topic in CONF.notification_topics: - topic = '%s.%s' % (topic, priority) - try: - rpc.notify(context, topic, message) - except Exception: - LOG.exception(_("Could not send notification to %(topic)s. " - "Payload=%(message)s"), - {"topic": topic, "message": message}) diff --git a/libra/openstack/common/notifier/rpc_notifier2.py b/libra/openstack/common/notifier/rpc_notifier2.py deleted file mode 100644 index 404c82e7..00000000 --- a/libra/openstack/common/notifier/rpc_notifier2.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -'''messaging based notification driver, with message envelopes''' - -from oslo.config import cfg - -from libra.openstack.common import context as req_context -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging -from libra.openstack.common import rpc - -LOG = logging.getLogger(__name__) - -notification_topic_opt = cfg.ListOpt( - 'topics', default=['notifications', ], - help='AMQP topic(s) used for OpenStack notifications') - -opt_group = cfg.OptGroup(name='rpc_notifier2', - title='Options for rpc_notifier2') - -CONF = cfg.CONF -CONF.register_group(opt_group) -CONF.register_opt(notification_topic_opt, opt_group) - - -def notify(context, message): - """Sends a notification via RPC.""" - if not context: - context = req_context.get_admin_context() - priority = message.get('priority', - CONF.default_notification_level) - priority = priority.lower() - for topic in CONF.rpc_notifier2.topics: - topic = '%s.%s' % (topic, priority) - try: - rpc.notify(context, topic, message, envelope=True) - except Exception: - LOG.exception(_("Could not send notification to %(topic)s. " - "Payload=%(message)s"), - {"topic": topic, "message": message}) diff --git a/libra/openstack/common/notifier/test_notifier.py b/libra/openstack/common/notifier/test_notifier.py deleted file mode 100644 index 96c1746b..00000000 --- a/libra/openstack/common/notifier/test_notifier.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -NOTIFICATIONS = [] - - -def notify(_context, message): - """Test notifier, stores notifications in memory for unittests.""" - NOTIFICATIONS.append(message) diff --git a/libra/openstack/common/rpc/__init__.py b/libra/openstack/common/rpc/__init__.py deleted file mode 100644 index ca3cf288..00000000 --- a/libra/openstack/common/rpc/__init__.py +++ /dev/null @@ -1,306 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2011 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A remote procedure call (rpc) abstraction. - -For some wrappers that add message versioning to rpc, see: - rpc.dispatcher - rpc.proxy -""" - -import inspect - -from oslo.config import cfg - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils -from libra.openstack.common import local -from libra.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -rpc_opts = [ - cfg.StrOpt('rpc_backend', - default='%s.impl_kombu' % __package__, - help="The messaging module to use, defaults to kombu."), - cfg.IntOpt('rpc_thread_pool_size', - default=64, - help='Size of RPC thread pool'), - cfg.IntOpt('rpc_conn_pool_size', - default=30, - help='Size of RPC connection pool'), - cfg.IntOpt('rpc_response_timeout', - default=60, - help='Seconds to wait for a response from call or multicall'), - cfg.IntOpt('rpc_cast_timeout', - default=30, - help='Seconds to wait before a cast expires (TTL). ' - 'Only supported by impl_zmq.'), - cfg.ListOpt('allowed_rpc_exception_modules', - default=['nova.exception', - 'cinder.exception', - 'exceptions', - ], - help='Modules of exceptions that are permitted to be recreated' - ' upon receiving exception data from an rpc call.'), - cfg.BoolOpt('fake_rabbit', - default=False, - help='If passed, use a fake RabbitMQ provider'), - cfg.StrOpt('control_exchange', - default='openstack', - help='AMQP exchange to connect to if using RabbitMQ or Qpid'), -] - -CONF = cfg.CONF -CONF.register_opts(rpc_opts) - - -def set_defaults(control_exchange): - cfg.set_defaults(rpc_opts, - control_exchange=control_exchange) - - -def create_connection(new=True): - """Create a connection to the message bus used for rpc. - - For some example usage of creating a connection and some consumers on that - connection, see nova.service. - - :param new: Whether or not to create a new connection. A new connection - will be created by default. If new is False, the - implementation is free to return an existing connection from a - pool. - - :returns: An instance of openstack.common.rpc.common.Connection - """ - return _get_impl().create_connection(CONF, new=new) - - -def _check_for_lock(): - if not CONF.debug: - return None - - if ((hasattr(local.strong_store, 'locks_held') - and local.strong_store.locks_held)): - stack = ' :: '.join([frame[3] for frame in inspect.stack()]) - LOG.warn(_('A RPC is being made while holding a lock. The locks ' - 'currently held are %(locks)s. This is probably a bug. ' - 'Please report it. Include the following: [%(stack)s].'), - {'locks': local.strong_store.locks_held, - 'stack': stack}) - return True - - return False - - -def call(context, topic, msg, timeout=None, check_for_lock=False): - """Invoke a remote method that returns something. - - :param context: Information that identifies the user that has made this - request. - :param topic: The topic to send the rpc message to. This correlates to the - topic argument of - openstack.common.rpc.common.Connection.create_consumer() - and only applies when the consumer was created with - fanout=False. - :param msg: This is a dict in the form { "method" : "method_to_invoke", - "args" : dict_of_kwargs } - :param timeout: int, number of seconds to use for a response timeout. - If set, this overrides the rpc_response_timeout option. - :param check_for_lock: if True, a warning is emitted if a RPC call is made - with a lock held. - - :returns: A dict from the remote method. - - :raises: openstack.common.rpc.common.Timeout if a complete response - is not received before the timeout is reached. - """ - if check_for_lock: - _check_for_lock() - return _get_impl().call(CONF, context, topic, msg, timeout) - - -def cast(context, topic, msg): - """Invoke a remote method that does not return anything. - - :param context: Information that identifies the user that has made this - request. - :param topic: The topic to send the rpc message to. This correlates to the - topic argument of - openstack.common.rpc.common.Connection.create_consumer() - and only applies when the consumer was created with - fanout=False. - :param msg: This is a dict in the form { "method" : "method_to_invoke", - "args" : dict_of_kwargs } - - :returns: None - """ - return _get_impl().cast(CONF, context, topic, msg) - - -def fanout_cast(context, topic, msg): - """Broadcast a remote method invocation with no return. - - This method will get invoked on all consumers that were set up with this - topic name and fanout=True. - - :param context: Information that identifies the user that has made this - request. - :param topic: The topic to send the rpc message to. This correlates to the - topic argument of - openstack.common.rpc.common.Connection.create_consumer() - and only applies when the consumer was created with - fanout=True. - :param msg: This is a dict in the form { "method" : "method_to_invoke", - "args" : dict_of_kwargs } - - :returns: None - """ - return _get_impl().fanout_cast(CONF, context, topic, msg) - - -def multicall(context, topic, msg, timeout=None, check_for_lock=False): - """Invoke a remote method and get back an iterator. - - In this case, the remote method will be returning multiple values in - separate messages, so the return values can be processed as the come in via - an iterator. - - :param context: Information that identifies the user that has made this - request. - :param topic: The topic to send the rpc message to. This correlates to the - topic argument of - openstack.common.rpc.common.Connection.create_consumer() - and only applies when the consumer was created with - fanout=False. - :param msg: This is a dict in the form { "method" : "method_to_invoke", - "args" : dict_of_kwargs } - :param timeout: int, number of seconds to use for a response timeout. - If set, this overrides the rpc_response_timeout option. - :param check_for_lock: if True, a warning is emitted if a RPC call is made - with a lock held. - - :returns: An iterator. The iterator will yield a tuple (N, X) where N is - an index that starts at 0 and increases by one for each value - returned and X is the Nth value that was returned by the remote - method. - - :raises: openstack.common.rpc.common.Timeout if a complete response - is not received before the timeout is reached. - """ - if check_for_lock: - _check_for_lock() - return _get_impl().multicall(CONF, context, topic, msg, timeout) - - -def notify(context, topic, msg, envelope=False): - """Send notification event. - - :param context: Information that identifies the user that has made this - request. - :param topic: The topic to send the notification to. - :param msg: This is a dict of content of event. - :param envelope: Set to True to enable message envelope for notifications. - - :returns: None - """ - return _get_impl().notify(cfg.CONF, context, topic, msg, envelope) - - -def cleanup(): - """Clean up resoruces in use by implementation. - - Clean up any resources that have been allocated by the RPC implementation. - This is typically open connections to a messaging service. This function - would get called before an application using this API exits to allow - connections to get torn down cleanly. - - :returns: None - """ - return _get_impl().cleanup() - - -def cast_to_server(context, server_params, topic, msg): - """Invoke a remote method that does not return anything. - - :param context: Information that identifies the user that has made this - request. - :param server_params: Connection information - :param topic: The topic to send the notification to. - :param msg: This is a dict in the form { "method" : "method_to_invoke", - "args" : dict_of_kwargs } - - :returns: None - """ - return _get_impl().cast_to_server(CONF, context, server_params, topic, - msg) - - -def fanout_cast_to_server(context, server_params, topic, msg): - """Broadcast to a remote method invocation with no return. - - :param context: Information that identifies the user that has made this - request. - :param server_params: Connection information - :param topic: The topic to send the notification to. - :param msg: This is a dict in the form { "method" : "method_to_invoke", - "args" : dict_of_kwargs } - - :returns: None - """ - return _get_impl().fanout_cast_to_server(CONF, context, server_params, - topic, msg) - - -def queue_get_for(context, topic, host): - """Get a queue name for a given topic + host. - - This function only works if this naming convention is followed on the - consumer side, as well. For example, in nova, every instance of the - nova-foo service calls create_consumer() for two topics: - - foo - foo. - - Messages sent to the 'foo' topic are distributed to exactly one instance of - the nova-foo service. The services are chosen in a round-robin fashion. - Messages sent to the 'foo.' topic are sent to the nova-foo service on - . - """ - return '%s.%s' % (topic, host) if host else topic - - -_RPCIMPL = None - - -def _get_impl(): - """Delay import of rpc_backend until configuration is loaded.""" - global _RPCIMPL - if _RPCIMPL is None: - try: - _RPCIMPL = importutils.import_module(CONF.rpc_backend) - except ImportError: - # For backwards compatibility with older nova config. - impl = CONF.rpc_backend.replace('nova.rpc', - 'nova.openstack.common.rpc') - _RPCIMPL = importutils.import_module(impl) - return _RPCIMPL diff --git a/libra/openstack/common/rpc/amqp.py b/libra/openstack/common/rpc/amqp.py deleted file mode 100644 index ef714a27..00000000 --- a/libra/openstack/common/rpc/amqp.py +++ /dev/null @@ -1,636 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2011 - 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Shared code between AMQP based openstack.common.rpc implementations. - -The code in this module is shared between the rpc implemenations based on AMQP. -Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses -AMQP, but is deprecated and predates this code. -""" - -import collections -import inspect -import sys -import uuid - -from eventlet import greenpool -from eventlet import pools -from eventlet import queue -from eventlet import semaphore -from oslo.config import cfg - -from libra.openstack.common import excutils -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import local -from libra.openstack.common import log as logging -from libra.openstack.common.rpc import common as rpc_common - - -amqp_opts = [ - cfg.BoolOpt('amqp_durable_queues', - default=False, - deprecated_name='rabbit_durable_queues', - deprecated_group='DEFAULT', - help='Use durable queues in amqp.'), - cfg.BoolOpt('amqp_auto_delete', - default=False, - help='Auto-delete queues in amqp.'), -] - -cfg.CONF.register_opts(amqp_opts) - -UNIQUE_ID = '_unique_id' -LOG = logging.getLogger(__name__) - - -class Pool(pools.Pool): - """Class that implements a Pool of Connections.""" - def __init__(self, conf, connection_cls, *args, **kwargs): - self.connection_cls = connection_cls - self.conf = conf - kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) - kwargs.setdefault("order_as_stack", True) - super(Pool, self).__init__(*args, **kwargs) - self.reply_proxy = None - - # TODO(comstud): Timeout connections not used in a while - def create(self): - LOG.debug(_('Pool creating new connection')) - return self.connection_cls(self.conf) - - def empty(self): - while self.free_items: - self.get().close() - # Force a new connection pool to be created. - # Note that this was added due to failing unit test cases. The issue - # is the above "while loop" gets all the cached connections from the - # pool and closes them, but never returns them to the pool, a pool - # leak. The unit tests hang waiting for an item to be returned to the - # pool. The unit tests get here via the tearDown() method. In the run - # time code, it gets here via cleanup() and only appears in service.py - # just before doing a sys.exit(), so cleanup() only happens once and - # the leakage is not a problem. - self.connection_cls.pool = None - - -_pool_create_sem = semaphore.Semaphore() - - -def get_connection_pool(conf, connection_cls): - with _pool_create_sem: - # Make sure only one thread tries to create the connection pool. - if not connection_cls.pool: - connection_cls.pool = Pool(conf, connection_cls) - return connection_cls.pool - - -class ConnectionContext(rpc_common.Connection): - """The class that is actually returned to the create_connection() caller. - - This is essentially a wrapper around Connection that supports 'with'. - It can also return a new Connection, or one from a pool. - - The function will also catch when an instance of this class is to be - deleted. With that we can return Connections to the pool on exceptions - and so forth without making the caller be responsible for catching them. - If possible the function makes sure to return a connection to the pool. - """ - - def __init__(self, conf, connection_pool, pooled=True, server_params=None): - """Create a new connection, or get one from the pool.""" - self.connection = None - self.conf = conf - self.connection_pool = connection_pool - if pooled: - self.connection = connection_pool.get() - else: - self.connection = connection_pool.connection_cls( - conf, - server_params=server_params) - self.pooled = pooled - - def __enter__(self): - """When with ConnectionContext() is used, return self.""" - return self - - def _done(self): - """If the connection came from a pool, clean it up and put it back. - If it did not come from a pool, close it. - """ - if self.connection: - if self.pooled: - # Reset the connection so it's ready for the next caller - # to grab from the pool - self.connection.reset() - self.connection_pool.put(self.connection) - else: - try: - self.connection.close() - except Exception: - pass - self.connection = None - - def __exit__(self, exc_type, exc_value, tb): - """End of 'with' statement. We're done here.""" - self._done() - - def __del__(self): - """Caller is done with this connection. Make sure we cleaned up.""" - self._done() - - def close(self): - """Caller is done with this connection.""" - self._done() - - def create_consumer(self, topic, proxy, fanout=False): - self.connection.create_consumer(topic, proxy, fanout) - - def create_worker(self, topic, proxy, pool_name): - self.connection.create_worker(topic, proxy, pool_name) - - def join_consumer_pool(self, callback, pool_name, topic, exchange_name, - ack_on_error=True): - self.connection.join_consumer_pool(callback, - pool_name, - topic, - exchange_name, - ack_on_error) - - def consume_in_thread(self): - self.connection.consume_in_thread() - - def __getattr__(self, key): - """Proxy all other calls to the Connection instance.""" - if self.connection: - return getattr(self.connection, key) - else: - raise rpc_common.InvalidRPCConnectionReuse() - - -class ReplyProxy(ConnectionContext): - """Connection class for RPC replies / callbacks.""" - def __init__(self, conf, connection_pool): - self._call_waiters = {} - self._num_call_waiters = 0 - self._num_call_waiters_wrn_threshhold = 10 - self._reply_q = 'reply_' + uuid.uuid4().hex - super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) - self.declare_direct_consumer(self._reply_q, self._process_data) - self.consume_in_thread() - - def _process_data(self, message_data): - msg_id = message_data.pop('_msg_id', None) - waiter = self._call_waiters.get(msg_id) - if not waiter: - LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s' - ', message : %(data)s'), {'msg_id': msg_id, - 'data': message_data}) - LOG.warn(_('_call_waiters: %s') % str(self._call_waiters)) - else: - waiter.put(message_data) - - def add_call_waiter(self, waiter, msg_id): - self._num_call_waiters += 1 - if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: - LOG.warn(_('Number of call waiters is greater than warning ' - 'threshhold: %d. There could be a MulticallProxyWaiter ' - 'leak.') % self._num_call_waiters_wrn_threshhold) - self._num_call_waiters_wrn_threshhold *= 2 - self._call_waiters[msg_id] = waiter - - def del_call_waiter(self, msg_id): - self._num_call_waiters -= 1 - del self._call_waiters[msg_id] - - def get_reply_q(self): - return self._reply_q - - -def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, - failure=None, ending=False, log_failure=True): - """Sends a reply or an error on the channel signified by msg_id. - - Failure should be a sys.exc_info() tuple. - - """ - with ConnectionContext(conf, connection_pool) as conn: - if failure: - failure = rpc_common.serialize_remote_exception(failure, - log_failure) - - msg = {'result': reply, 'failure': failure} - if ending: - msg['ending'] = True - _add_unique_id(msg) - # If a reply_q exists, add the msg_id to the reply and pass the - # reply_q to direct_send() to use it as the response queue. - # Otherwise use the msg_id for backward compatibilty. - if reply_q: - msg['_msg_id'] = msg_id - conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) - else: - conn.direct_send(msg_id, rpc_common.serialize_msg(msg)) - - -class RpcContext(rpc_common.CommonRpcContext): - """Context that supports replying to a rpc.call.""" - def __init__(self, **kwargs): - self.msg_id = kwargs.pop('msg_id', None) - self.reply_q = kwargs.pop('reply_q', None) - self.conf = kwargs.pop('conf') - super(RpcContext, self).__init__(**kwargs) - - def deepcopy(self): - values = self.to_dict() - values['conf'] = self.conf - values['msg_id'] = self.msg_id - values['reply_q'] = self.reply_q - return self.__class__(**values) - - def reply(self, reply=None, failure=None, ending=False, - connection_pool=None, log_failure=True): - if self.msg_id: - msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool, - reply, failure, ending, log_failure) - if ending: - self.msg_id = None - - -def unpack_context(conf, msg): - """Unpack context from msg.""" - context_dict = {} - for key in list(msg.keys()): - # NOTE(vish): Some versions of python don't like unicode keys - # in kwargs. - key = str(key) - if key.startswith('_context_'): - value = msg.pop(key) - context_dict[key[9:]] = value - context_dict['msg_id'] = msg.pop('_msg_id', None) - context_dict['reply_q'] = msg.pop('_reply_q', None) - context_dict['conf'] = conf - ctx = RpcContext.from_dict(context_dict) - rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) - return ctx - - -def pack_context(msg, context): - """Pack context into msg. - - Values for message keys need to be less than 255 chars, so we pull - context out into a bunch of separate keys. If we want to support - more arguments in rabbit messages, we may want to do the same - for args at some point. - - """ - if isinstance(context, dict): - context_d = dict([('_context_%s' % key, value) - for (key, value) in context.iteritems()]) - else: - context_d = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - - msg.update(context_d) - - -class _MsgIdCache(object): - """This class checks any duplicate messages.""" - - # NOTE: This value is considered can be a configuration item, but - # it is not necessary to change its value in most cases, - # so let this value as static for now. - DUP_MSG_CHECK_SIZE = 16 - - def __init__(self, **kwargs): - self.prev_msgids = collections.deque([], - maxlen=self.DUP_MSG_CHECK_SIZE) - - def check_duplicate_message(self, message_data): - """AMQP consumers may read same message twice when exceptions occur - before ack is returned. This method prevents doing it. - """ - if UNIQUE_ID in message_data: - msg_id = message_data[UNIQUE_ID] - if msg_id not in self.prev_msgids: - self.prev_msgids.append(msg_id) - else: - raise rpc_common.DuplicateMessageError(msg_id=msg_id) - - -def _add_unique_id(msg): - """Add unique_id for checking duplicate messages.""" - unique_id = uuid.uuid4().hex - msg.update({UNIQUE_ID: unique_id}) - LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) - - -class _ThreadPoolWithWait(object): - """Base class for a delayed invocation manager. - - Used by the Connection class to start up green threads - to handle incoming messages. - """ - - def __init__(self, conf, connection_pool): - self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) - self.connection_pool = connection_pool - self.conf = conf - - def wait(self): - """Wait for all callback threads to exit.""" - self.pool.waitall() - - -class CallbackWrapper(_ThreadPoolWithWait): - """Wraps a straight callback. - - Allows it to be invoked in a green thread. - """ - - def __init__(self, conf, callback, connection_pool, - wait_for_consumers=False): - """Initiates CallbackWrapper object. - - :param conf: cfg.CONF instance - :param callback: a callable (probably a function) - :param connection_pool: connection pool as returned by - get_connection_pool() - :param wait_for_consumers: wait for all green threads to - complete and raise the last - caught exception, if any. - - """ - super(CallbackWrapper, self).__init__( - conf=conf, - connection_pool=connection_pool, - ) - self.callback = callback - self.wait_for_consumers = wait_for_consumers - self.exc_info = None - - def _wrap(self, message_data, **kwargs): - """Wrap the callback invocation to catch exceptions. - """ - try: - self.callback(message_data, **kwargs) - except Exception: - self.exc_info = sys.exc_info() - - def __call__(self, message_data): - self.exc_info = None - self.pool.spawn_n(self._wrap, message_data) - - if self.wait_for_consumers: - self.pool.waitall() - if self.exc_info: - raise self.exc_info[1], None, self.exc_info[2] - - -class ProxyCallback(_ThreadPoolWithWait): - """Calls methods on a proxy object based on method and args.""" - - def __init__(self, conf, proxy, connection_pool): - super(ProxyCallback, self).__init__( - conf=conf, - connection_pool=connection_pool, - ) - self.proxy = proxy - self.msg_id_cache = _MsgIdCache() - - def __call__(self, message_data): - """Consumer callback to call a method on a proxy object. - - Parses the message for validity and fires off a thread to call the - proxy object method. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ - # It is important to clear the context here, because at this point - # the previous context is stored in local.store.context - if hasattr(local.store, 'context'): - del local.store.context - rpc_common._safe_log(LOG.debug, _('received %s'), message_data) - self.msg_id_cache.check_duplicate_message(message_data) - ctxt = unpack_context(self.conf, message_data) - method = message_data.get('method') - args = message_data.get('args', {}) - version = message_data.get('version') - namespace = message_data.get('namespace') - if not method: - LOG.warn(_('no method for message: %s') % message_data) - ctxt.reply(_('No method for message: %s') % message_data, - connection_pool=self.connection_pool) - return - self.pool.spawn_n(self._process_data, ctxt, version, method, - namespace, args) - - def _process_data(self, ctxt, version, method, namespace, args): - """Process a message in a new thread. - - If the proxy object we have has a dispatch method - (see rpc.dispatcher.RpcDispatcher), pass it the version, - method, and args and let it dispatch as appropriate. If not, use - the old behavior of magically calling the specified method on the - proxy we have here. - """ - ctxt.update_store() - try: - rval = self.proxy.dispatch(ctxt, version, method, namespace, - **args) - # Check if the result was a generator - if inspect.isgenerator(rval): - for x in rval: - ctxt.reply(x, None, connection_pool=self.connection_pool) - else: - ctxt.reply(rval, None, connection_pool=self.connection_pool) - # This final None tells multicall that it is done. - ctxt.reply(ending=True, connection_pool=self.connection_pool) - except rpc_common.ClientException as e: - LOG.debug(_('Expected exception during message handling (%s)') % - e._exc_info[1]) - ctxt.reply(None, e._exc_info, - connection_pool=self.connection_pool, - log_failure=False) - except Exception: - # sys.exc_info() is deleted by LOG.exception(). - exc_info = sys.exc_info() - LOG.error(_('Exception during message handling'), - exc_info=exc_info) - ctxt.reply(None, exc_info, connection_pool=self.connection_pool) - - -class MulticallProxyWaiter(object): - def __init__(self, conf, msg_id, timeout, connection_pool): - self._msg_id = msg_id - self._timeout = timeout or conf.rpc_response_timeout - self._reply_proxy = connection_pool.reply_proxy - self._done = False - self._got_ending = False - self._conf = conf - self._dataqueue = queue.LightQueue() - # Add this caller to the reply proxy's call_waiters - self._reply_proxy.add_call_waiter(self, self._msg_id) - self.msg_id_cache = _MsgIdCache() - - def put(self, data): - self._dataqueue.put(data) - - def done(self): - if self._done: - return - self._done = True - # Remove this caller from reply proxy's call_waiters - self._reply_proxy.del_call_waiter(self._msg_id) - - def _process_data(self, data): - result = None - self.msg_id_cache.check_duplicate_message(data) - if data['failure']: - failure = data['failure'] - result = rpc_common.deserialize_remote_exception(self._conf, - failure) - elif data.get('ending', False): - self._got_ending = True - else: - result = data['result'] - return result - - def __iter__(self): - """Return a result until we get a reply with an 'ending' flag.""" - if self._done: - raise StopIteration - while True: - try: - data = self._dataqueue.get(timeout=self._timeout) - result = self._process_data(data) - except queue.Empty: - self.done() - raise rpc_common.Timeout() - except Exception: - with excutils.save_and_reraise_exception(): - self.done() - if self._got_ending: - self.done() - raise StopIteration - if isinstance(result, Exception): - self.done() - raise result - yield result - - -def create_connection(conf, new, connection_pool): - """Create a connection.""" - return ConnectionContext(conf, connection_pool, pooled=not new) - - -_reply_proxy_create_sem = semaphore.Semaphore() - - -def multicall(conf, context, topic, msg, timeout, connection_pool): - """Make a call that returns multiple times.""" - LOG.debug(_('Making synchronous call on %s ...'), topic) - msg_id = uuid.uuid4().hex - msg.update({'_msg_id': msg_id}) - LOG.debug(_('MSG_ID is %s') % (msg_id)) - _add_unique_id(msg) - pack_context(msg, context) - - with _reply_proxy_create_sem: - if not connection_pool.reply_proxy: - connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) - msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) - wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) - with ConnectionContext(conf, connection_pool) as conn: - conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) - return wait_msg - - -def call(conf, context, topic, msg, timeout, connection_pool): - """Sends a message on a topic and wait for a response.""" - rv = multicall(conf, context, topic, msg, timeout, connection_pool) - # NOTE(vish): return the last result from the multicall - rv = list(rv) - if not rv: - return - return rv[-1] - - -def cast(conf, context, topic, msg, connection_pool): - """Sends a message on a topic without waiting for a response.""" - LOG.debug(_('Making asynchronous cast on %s...'), topic) - _add_unique_id(msg) - pack_context(msg, context) - with ConnectionContext(conf, connection_pool) as conn: - conn.topic_send(topic, rpc_common.serialize_msg(msg)) - - -def fanout_cast(conf, context, topic, msg, connection_pool): - """Sends a message on a fanout exchange without waiting for a response.""" - LOG.debug(_('Making asynchronous fanout cast...')) - _add_unique_id(msg) - pack_context(msg, context) - with ConnectionContext(conf, connection_pool) as conn: - conn.fanout_send(topic, rpc_common.serialize_msg(msg)) - - -def cast_to_server(conf, context, server_params, topic, msg, connection_pool): - """Sends a message on a topic to a specific server.""" - _add_unique_id(msg) - pack_context(msg, context) - with ConnectionContext(conf, connection_pool, pooled=False, - server_params=server_params) as conn: - conn.topic_send(topic, rpc_common.serialize_msg(msg)) - - -def fanout_cast_to_server(conf, context, server_params, topic, msg, - connection_pool): - """Sends a message on a fanout exchange to a specific server.""" - _add_unique_id(msg) - pack_context(msg, context) - with ConnectionContext(conf, connection_pool, pooled=False, - server_params=server_params) as conn: - conn.fanout_send(topic, rpc_common.serialize_msg(msg)) - - -def notify(conf, context, topic, msg, connection_pool, envelope): - """Sends a notification event on a topic.""" - LOG.debug(_('Sending %(event_type)s on %(topic)s'), - dict(event_type=msg.get('event_type'), - topic=topic)) - _add_unique_id(msg) - pack_context(msg, context) - with ConnectionContext(conf, connection_pool) as conn: - if envelope: - msg = rpc_common.serialize_msg(msg) - conn.notify_send(topic, msg) - - -def cleanup(connection_pool): - if connection_pool: - connection_pool.empty() - - -def get_control_exchange(conf): - return conf.control_exchange diff --git a/libra/openstack/common/rpc/common.py b/libra/openstack/common/rpc/common.py deleted file mode 100644 index 4f798ff8..00000000 --- a/libra/openstack/common/rpc/common.py +++ /dev/null @@ -1,506 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2011 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import sys -import traceback - -from oslo.config import cfg -import six - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils -from libra.openstack.common import jsonutils -from libra.openstack.common import local -from libra.openstack.common import log as logging -from libra.openstack.common import versionutils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -'''RPC Envelope Version. - -This version number applies to the top level structure of messages sent out. -It does *not* apply to the message payload, which must be versioned -independently. For example, when using rpc APIs, a version number is applied -for changes to the API being exposed over rpc. This version number is handled -in the rpc proxy and dispatcher modules. - -This version number applies to the message envelope that is used in the -serialization done inside the rpc layer. See serialize_msg() and -deserialize_msg(). - -The current message format (version 2.0) is very simple. It is: - - { - 'oslo.version': , - 'oslo.message': - } - -Message format version '1.0' is just considered to be the messages we sent -without a message envelope. - -So, the current message envelope just includes the envelope version. It may -eventually contain additional information, such as a signature for the message -payload. - -We will JSON encode the application message payload. The message envelope, -which includes the JSON encoded application message body, will be passed down -to the messaging libraries as a dict. -''' -_RPC_ENVELOPE_VERSION = '2.0' - -_VERSION_KEY = 'oslo.version' -_MESSAGE_KEY = 'oslo.message' - -_REMOTE_POSTFIX = '_Remote' - - -class RPCException(Exception): - msg_fmt = _("An unknown RPC related exception occurred.") - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if not message: - try: - message = self.msg_fmt % kwargs - - except Exception: - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception(_('Exception in string format operation')) - for name, value in kwargs.iteritems(): - LOG.error("%s: %s" % (name, value)) - # at least get the core message out if something happened - message = self.msg_fmt - - super(RPCException, self).__init__(message) - - -class RemoteError(RPCException): - """Signifies that a remote class has raised an exception. - - Contains a string representation of the type of the original exception, - the value of the original exception, and the traceback. These are - sent to the parent as a joined string so printing the exception - contains all of the relevant info. - - """ - msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") - - def __init__(self, exc_type=None, value=None, traceback=None): - self.exc_type = exc_type - self.value = value - self.traceback = traceback - super(RemoteError, self).__init__(exc_type=exc_type, - value=value, - traceback=traceback) - - -class Timeout(RPCException): - """Signifies that a timeout has occurred. - - This exception is raised if the rpc_response_timeout is reached while - waiting for a response from the remote side. - """ - msg_fmt = _('Timeout while waiting on RPC response - ' - 'topic: "%(topic)s", RPC method: "%(method)s" ' - 'info: "%(info)s"') - - def __init__(self, info=None, topic=None, method=None): - """Initiates Timeout object. - - :param info: Extra info to convey to the user - :param topic: The topic that the rpc call was sent to - :param rpc_method_name: The name of the rpc method being - called - """ - self.info = info - self.topic = topic - self.method = method - super(Timeout, self).__init__( - None, - info=info or _(''), - topic=topic or _(''), - method=method or _('')) - - -class DuplicateMessageError(RPCException): - msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.") - - -class InvalidRPCConnectionReuse(RPCException): - msg_fmt = _("Invalid reuse of an RPC connection.") - - -class UnsupportedRpcVersion(RPCException): - msg_fmt = _("Specified RPC version, %(version)s, not supported by " - "this endpoint.") - - -class UnsupportedRpcEnvelopeVersion(RPCException): - msg_fmt = _("Specified RPC envelope version, %(version)s, " - "not supported by this endpoint.") - - -class RpcVersionCapError(RPCException): - msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low") - - -class Connection(object): - """A connection, returned by rpc.create_connection(). - - This class represents a connection to the message bus used for rpc. - An instance of this class should never be created by users of the rpc API. - Use rpc.create_connection() instead. - """ - def close(self): - """Close the connection. - - This method must be called when the connection will no longer be used. - It will ensure that any resources associated with the connection, such - as a network connection, and cleaned up. - """ - raise NotImplementedError() - - def create_consumer(self, topic, proxy, fanout=False): - """Create a consumer on this connection. - - A consumer is associated with a message queue on the backend message - bus. The consumer will read messages from the queue, unpack them, and - dispatch them to the proxy object. The contents of the message pulled - off of the queue will determine which method gets called on the proxy - object. - - :param topic: This is a name associated with what to consume from. - Multiple instances of a service may consume from the same - topic. For example, all instances of nova-compute consume - from a queue called "compute". In that case, the - messages will get distributed amongst the consumers in a - round-robin fashion if fanout=False. If fanout=True, - every consumer associated with this topic will get a - copy of every message. - :param proxy: The object that will handle all incoming messages. - :param fanout: Whether or not this is a fanout topic. See the - documentation for the topic parameter for some - additional comments on this. - """ - raise NotImplementedError() - - def create_worker(self, topic, proxy, pool_name): - """Create a worker on this connection. - - A worker is like a regular consumer of messages directed to a - topic, except that it is part of a set of such consumers (the - "pool") which may run in parallel. Every pool of workers will - receive a given message, but only one worker in the pool will - be asked to process it. Load is distributed across the members - of the pool in round-robin fashion. - - :param topic: This is a name associated with what to consume from. - Multiple instances of a service may consume from the same - topic. - :param proxy: The object that will handle all incoming messages. - :param pool_name: String containing the name of the pool of workers - """ - raise NotImplementedError() - - def join_consumer_pool(self, callback, pool_name, topic, exchange_name): - """Register as a member of a group of consumers. - - Uses given topic from the specified exchange. - Exactly one member of a given pool will receive each message. - - A message will be delivered to multiple pools, if more than - one is created. - - :param callback: Callable to be invoked for each message. - :type callback: callable accepting one argument - :param pool_name: The name of the consumer pool. - :type pool_name: str - :param topic: The routing topic for desired messages. - :type topic: str - :param exchange_name: The name of the message exchange where - the client should attach. Defaults to - the configured exchange. - :type exchange_name: str - """ - raise NotImplementedError() - - def consume_in_thread(self): - """Spawn a thread to handle incoming messages. - - Spawn a thread that will be responsible for handling all incoming - messages for consumers that were set up on this connection. - - Message dispatching inside of this is expected to be implemented in a - non-blocking manner. An example implementation would be having this - thread pull messages in for all of the consumers, but utilize a thread - pool for dispatching the messages to the proxy objects. - """ - raise NotImplementedError() - - -def _safe_log(log_func, msg, msg_data): - """Sanitizes the msg_data field before logging.""" - SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass'] - - def _fix_passwords(d): - """Sanitizes the password fields in the dictionary.""" - for k in d.iterkeys(): - if k.lower().find('password') != -1: - d[k] = '' - elif k.lower() in SANITIZE: - d[k] = '' - elif isinstance(d[k], dict): - _fix_passwords(d[k]) - return d - - return log_func(msg, _fix_passwords(copy.deepcopy(msg_data))) - - -def serialize_remote_exception(failure_info, log_failure=True): - """Prepares exception data to be sent over rpc. - - Failure_info should be a sys.exc_info() tuple. - - """ - tb = traceback.format_exception(*failure_info) - failure = failure_info[1] - if log_failure: - LOG.error(_("Returning exception %s to caller"), - six.text_type(failure)) - LOG.error(tb) - - kwargs = {} - if hasattr(failure, 'kwargs'): - kwargs = failure.kwargs - - # NOTE(matiu): With cells, it's possible to re-raise remote, remote - # exceptions. Lets turn it back into the original exception type. - cls_name = str(failure.__class__.__name__) - mod_name = str(failure.__class__.__module__) - if (cls_name.endswith(_REMOTE_POSTFIX) and - mod_name.endswith(_REMOTE_POSTFIX)): - cls_name = cls_name[:-len(_REMOTE_POSTFIX)] - mod_name = mod_name[:-len(_REMOTE_POSTFIX)] - - data = { - 'class': cls_name, - 'module': mod_name, - 'message': six.text_type(failure), - 'tb': tb, - 'args': failure.args, - 'kwargs': kwargs - } - - json_data = jsonutils.dumps(data) - - return json_data - - -def deserialize_remote_exception(conf, data): - failure = jsonutils.loads(str(data)) - - trace = failure.get('tb', []) - message = failure.get('message', "") + "\n" + "\n".join(trace) - name = failure.get('class') - module = failure.get('module') - - # NOTE(ameade): We DO NOT want to allow just any module to be imported, in - # order to prevent arbitrary code execution. - if module not in conf.allowed_rpc_exception_modules: - return RemoteError(name, failure.get('message'), trace) - - try: - mod = importutils.import_module(module) - klass = getattr(mod, name) - if not issubclass(klass, Exception): - raise TypeError("Can only deserialize Exceptions") - - failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) - except (AttributeError, TypeError, ImportError): - return RemoteError(name, failure.get('message'), trace) - - ex_type = type(failure) - str_override = lambda self: message - new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), - {'__str__': str_override, '__unicode__': str_override}) - new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) - try: - # NOTE(ameade): Dynamically create a new exception type and swap it in - # as the new type for the exception. This only works on user defined - # Exceptions and not core python exceptions. This is important because - # we cannot necessarily change an exception message so we must override - # the __str__ method. - failure.__class__ = new_ex_type - except TypeError: - # NOTE(ameade): If a core exception then just add the traceback to the - # first exception argument. - failure.args = (message,) + failure.args[1:] - return failure - - -class CommonRpcContext(object): - def __init__(self, **kwargs): - self.values = kwargs - - def __getattr__(self, key): - try: - return self.values[key] - except KeyError: - raise AttributeError(key) - - def to_dict(self): - return copy.deepcopy(self.values) - - @classmethod - def from_dict(cls, values): - return cls(**values) - - def deepcopy(self): - return self.from_dict(self.to_dict()) - - def update_store(self): - local.store.context = self - - def elevated(self, read_deleted=None, overwrite=False): - """Return a version of this context with admin flag set.""" - # TODO(russellb) This method is a bit of a nova-ism. It makes - # some assumptions about the data in the request context sent - # across rpc, while the rest of this class does not. We could get - # rid of this if we changed the nova code that uses this to - # convert the RpcContext back to its native RequestContext doing - # something like nova.context.RequestContext.from_dict(ctxt.to_dict()) - - context = self.deepcopy() - context.values['is_admin'] = True - - context.values.setdefault('roles', []) - - if 'admin' not in context.values['roles']: - context.values['roles'].append('admin') - - if read_deleted is not None: - context.values['read_deleted'] = read_deleted - - return context - - -class ClientException(Exception): - """Encapsulates actual exception expected to be hit by a RPC proxy object. - - Merely instantiating it records the current exception information, which - will be passed back to the RPC client without exceptional logging. - """ - def __init__(self): - self._exc_info = sys.exc_info() - - -def catch_client_exception(exceptions, func, *args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - if type(e) in exceptions: - raise ClientException() - else: - raise - - -def client_exceptions(*exceptions): - """Decorator for manager methods that raise expected exceptions. - - Marking a Manager method with this decorator allows the declaration - of expected exceptions that the RPC layer should not consider fatal, - and not log as if they were generated in a real error scenario. Note - that this will cause listed exceptions to be wrapped in a - ClientException, which is used internally by the RPC layer. - """ - def outer(func): - def inner(*args, **kwargs): - return catch_client_exception(exceptions, func, *args, **kwargs) - return inner - return outer - - -# TODO(sirp): we should deprecate this in favor of -# using `versionutils.is_compatible` directly -def version_is_compatible(imp_version, version): - """Determine whether versions are compatible. - - :param imp_version: The version implemented - :param version: The version requested by an incoming message. - """ - return versionutils.is_compatible(version, imp_version) - - -def serialize_msg(raw_msg): - # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more - # information about this format. - msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, - _MESSAGE_KEY: jsonutils.dumps(raw_msg)} - - return msg - - -def deserialize_msg(msg): - # NOTE(russellb): Hang on to your hats, this road is about to - # get a little bumpy. - # - # Robustness Principle: - # "Be strict in what you send, liberal in what you accept." - # - # At this point we have to do a bit of guessing about what it - # is we just received. Here is the set of possibilities: - # - # 1) We received a dict. This could be 2 things: - # - # a) Inspect it to see if it looks like a standard message envelope. - # If so, great! - # - # b) If it doesn't look like a standard message envelope, it could either - # be a notification, or a message from before we added a message - # envelope (referred to as version 1.0). - # Just return the message as-is. - # - # 2) It's any other non-dict type. Just return it and hope for the best. - # This case covers return values from rpc.call() from before message - # envelopes were used. (messages to call a method were always a dict) - - if not isinstance(msg, dict): - # See #2 above. - return msg - - base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) - if not all(map(lambda key: key in msg, base_envelope_keys)): - # See #1.b above. - return msg - - # At this point we think we have the message envelope - # format we were expecting. (#1.a above) - - if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): - raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) - - raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) - - return raw_msg diff --git a/libra/openstack/common/rpc/dispatcher.py b/libra/openstack/common/rpc/dispatcher.py deleted file mode 100644 index add2ed92..00000000 --- a/libra/openstack/common/rpc/dispatcher.py +++ /dev/null @@ -1,178 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Code for rpc message dispatching. - -Messages that come in have a version number associated with them. RPC API -version numbers are in the form: - - Major.Minor - -For a given message with version X.Y, the receiver must be marked as able to -handle messages of version A.B, where: - - A = X - - B >= Y - -The Major version number would be incremented for an almost completely new API. -The Minor version number would be incremented for backwards compatible changes -to an existing API. A backwards compatible change could be something like -adding a new method, adding an argument to an existing method (but not -requiring it), or changing the type for an existing argument (but still -handling the old type as well). - -The conversion over to a versioned API must be done on both the client side and -server side of the API at the same time. However, as the code stands today, -there can be both versioned and unversioned APIs implemented in the same code -base. - -EXAMPLES -======== - -Nova was the first project to use versioned rpc APIs. Consider the compute rpc -API as an example. The client side is in nova/compute/rpcapi.py and the server -side is in nova/compute/manager.py. - - -Example 1) Adding a new method. -------------------------------- - -Adding a new method is a backwards compatible change. It should be added to -nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to -X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should -have a specific version specified to indicate the minimum API version that must -be implemented for the method to be supported. For example:: - - def get_host_uptime(self, ctxt, host): - topic = _compute_topic(self.topic, ctxt, host, None) - return self.call(ctxt, self.make_msg('get_host_uptime'), topic, - version='1.1') - -In this case, version '1.1' is the first version that supported the -get_host_uptime() method. - - -Example 2) Adding a new parameter. ----------------------------------- - -Adding a new parameter to an rpc method can be made backwards compatible. The -RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped. -The implementation of the method must not expect the parameter to be present.:: - - def some_remote_method(self, arg1, arg2, newarg=None): - # The code needs to deal with newarg=None for cases - # where an older client sends a message without it. - pass - -On the client side, the same changes should be made as in example 1. The -minimum version that supports the new parameter should be specified. -""" - -from libra.openstack.common.rpc import common as rpc_common -from libra.openstack.common.rpc import serializer as rpc_serializer - - -class RpcDispatcher(object): - """Dispatch rpc messages according to the requested API version. - - This class can be used as the top level 'manager' for a service. It - contains a list of underlying managers that have an API_VERSION attribute. - """ - - def __init__(self, callbacks, serializer=None): - """Initialize the rpc dispatcher. - - :param callbacks: List of proxy objects that are an instance - of a class with rpc methods exposed. Each proxy - object should have an RPC_API_VERSION attribute. - :param serializer: The Serializer object that will be used to - deserialize arguments before the method call and - to serialize the result after it returns. - """ - self.callbacks = callbacks - if serializer is None: - serializer = rpc_serializer.NoOpSerializer() - self.serializer = serializer - super(RpcDispatcher, self).__init__() - - def _deserialize_args(self, context, kwargs): - """Helper method called to deserialize args before dispatch. - - This calls our serializer on each argument, returning a new set of - args that have been deserialized. - - :param context: The request context - :param kwargs: The arguments to be deserialized - :returns: A new set of deserialized args - """ - new_kwargs = dict() - for argname, arg in kwargs.iteritems(): - new_kwargs[argname] = self.serializer.deserialize_entity(context, - arg) - return new_kwargs - - def dispatch(self, ctxt, version, method, namespace, **kwargs): - """Dispatch a message based on a requested version. - - :param ctxt: The request context - :param version: The requested API version from the incoming message - :param method: The method requested to be called by the incoming - message. - :param namespace: The namespace for the requested method. If None, - the dispatcher will look for a method on a callback - object with no namespace set. - :param kwargs: A dict of keyword arguments to be passed to the method. - - :returns: Whatever is returned by the underlying method that gets - called. - """ - if not version: - version = '1.0' - - had_compatible = False - for proxyobj in self.callbacks: - # Check for namespace compatibility - try: - cb_namespace = proxyobj.RPC_API_NAMESPACE - except AttributeError: - cb_namespace = None - - if namespace != cb_namespace: - continue - - # Check for version compatibility - try: - rpc_api_version = proxyobj.RPC_API_VERSION - except AttributeError: - rpc_api_version = '1.0' - - is_compatible = rpc_common.version_is_compatible(rpc_api_version, - version) - had_compatible = had_compatible or is_compatible - - if not hasattr(proxyobj, method): - continue - if is_compatible: - kwargs = self._deserialize_args(ctxt, kwargs) - result = getattr(proxyobj, method)(ctxt, **kwargs) - return self.serializer.serialize_entity(ctxt, result) - - if had_compatible: - raise AttributeError("No such RPC function '%s'" % method) - else: - raise rpc_common.UnsupportedRpcVersion(version=version) diff --git a/libra/openstack/common/rpc/impl_fake.py b/libra/openstack/common/rpc/impl_fake.py deleted file mode 100644 index d217ad1a..00000000 --- a/libra/openstack/common/rpc/impl_fake.py +++ /dev/null @@ -1,195 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Fake RPC implementation which calls proxy methods directly with no -queues. Casts will block, but this is very useful for tests. -""" - -import inspect -# NOTE(russellb): We specifically want to use json, not our own jsonutils. -# jsonutils has some extra logic to automatically convert objects to primitive -# types so that they can be serialized. We want to catch all cases where -# non-primitive types make it into this code and treat it as an error. -import json -import time - -import eventlet - -from libra.openstack.common.rpc import common as rpc_common - -CONSUMERS = {} - - -class RpcContext(rpc_common.CommonRpcContext): - def __init__(self, **kwargs): - super(RpcContext, self).__init__(**kwargs) - self._response = [] - self._done = False - - def deepcopy(self): - values = self.to_dict() - new_inst = self.__class__(**values) - new_inst._response = self._response - new_inst._done = self._done - return new_inst - - def reply(self, reply=None, failure=None, ending=False): - if ending: - self._done = True - if not self._done: - self._response.append((reply, failure)) - - -class Consumer(object): - def __init__(self, topic, proxy): - self.topic = topic - self.proxy = proxy - - def call(self, context, version, method, namespace, args, timeout): - done = eventlet.event.Event() - - def _inner(): - ctxt = RpcContext.from_dict(context.to_dict()) - try: - rval = self.proxy.dispatch(context, version, method, - namespace, **args) - res = [] - # Caller might have called ctxt.reply() manually - for (reply, failure) in ctxt._response: - if failure: - raise failure[0], failure[1], failure[2] - res.append(reply) - # if ending not 'sent'...we might have more data to - # return from the function itself - if not ctxt._done: - if inspect.isgenerator(rval): - for val in rval: - res.append(val) - else: - res.append(rval) - done.send(res) - except rpc_common.ClientException as e: - done.send_exception(e._exc_info[1]) - except Exception as e: - done.send_exception(e) - - thread = eventlet.greenthread.spawn(_inner) - - if timeout: - start_time = time.time() - while not done.ready(): - eventlet.greenthread.sleep(1) - cur_time = time.time() - if (cur_time - start_time) > timeout: - thread.kill() - raise rpc_common.Timeout() - - return done.wait() - - -class Connection(object): - """Connection object.""" - - def __init__(self): - self.consumers = [] - - def create_consumer(self, topic, proxy, fanout=False): - consumer = Consumer(topic, proxy) - self.consumers.append(consumer) - if topic not in CONSUMERS: - CONSUMERS[topic] = [] - CONSUMERS[topic].append(consumer) - - def close(self): - for consumer in self.consumers: - CONSUMERS[consumer.topic].remove(consumer) - self.consumers = [] - - def consume_in_thread(self): - pass - - -def create_connection(conf, new=True): - """Create a connection.""" - return Connection() - - -def check_serialize(msg): - """Make sure a message intended for rpc can be serialized.""" - json.dumps(msg) - - -def multicall(conf, context, topic, msg, timeout=None): - """Make a call that returns multiple times.""" - - check_serialize(msg) - - method = msg.get('method') - if not method: - return - args = msg.get('args', {}) - version = msg.get('version', None) - namespace = msg.get('namespace', None) - - try: - consumer = CONSUMERS[topic][0] - except (KeyError, IndexError): - raise rpc_common.Timeout("No consumers available") - else: - return consumer.call(context, version, method, namespace, args, - timeout) - - -def call(conf, context, topic, msg, timeout=None): - """Sends a message on a topic and wait for a response.""" - rv = multicall(conf, context, topic, msg, timeout) - # NOTE(vish): return the last result from the multicall - rv = list(rv) - if not rv: - return - return rv[-1] - - -def cast(conf, context, topic, msg): - check_serialize(msg) - try: - call(conf, context, topic, msg) - except Exception: - pass - - -def notify(conf, context, topic, msg, envelope): - check_serialize(msg) - - -def cleanup(): - pass - - -def fanout_cast(conf, context, topic, msg): - """Cast to all consumers of a topic.""" - check_serialize(msg) - method = msg.get('method') - if not method: - return - args = msg.get('args', {}) - version = msg.get('version', None) - namespace = msg.get('namespace', None) - - for consumer in CONSUMERS.get(topic, []): - try: - consumer.call(context, version, method, namespace, args, None) - except Exception: - pass diff --git a/libra/openstack/common/rpc/impl_kombu.py b/libra/openstack/common/rpc/impl_kombu.py deleted file mode 100644 index 4badbb76..00000000 --- a/libra/openstack/common/rpc/impl_kombu.py +++ /dev/null @@ -1,856 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import itertools -import socket -import ssl -import time -import uuid - -import eventlet -import greenlet -import kombu -import kombu.connection -import kombu.entity -import kombu.messaging -from oslo.config import cfg - -from libra.openstack.common import excutils -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import network_utils -from libra.openstack.common.rpc import amqp as rpc_amqp -from libra.openstack.common.rpc import common as rpc_common -from libra.openstack.common import sslutils - -kombu_opts = [ - cfg.StrOpt('kombu_ssl_version', - default='', - help='SSL version to use (valid only if SSL enabled). ' - 'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may ' - 'be available on some distributions' - ), - cfg.StrOpt('kombu_ssl_keyfile', - default='', - help='SSL key file (valid only if SSL enabled)'), - cfg.StrOpt('kombu_ssl_certfile', - default='', - help='SSL cert file (valid only if SSL enabled)'), - cfg.StrOpt('kombu_ssl_ca_certs', - default='', - help=('SSL certification authority file ' - '(valid only if SSL enabled)')), - cfg.StrOpt('rabbit_host', - default='localhost', - help='The RabbitMQ broker address where a single node is used'), - cfg.IntOpt('rabbit_port', - default=5672, - help='The RabbitMQ broker port where a single node is used'), - cfg.ListOpt('rabbit_hosts', - default=['$rabbit_host:$rabbit_port'], - help='RabbitMQ HA cluster host:port pairs'), - cfg.BoolOpt('rabbit_use_ssl', - default=False, - help='connect over SSL for RabbitMQ'), - cfg.StrOpt('rabbit_userid', - default='guest', - help='the RabbitMQ userid'), - cfg.StrOpt('rabbit_password', - default='guest', - help='the RabbitMQ password', - secret=True), - cfg.StrOpt('rabbit_virtual_host', - default='/', - help='the RabbitMQ virtual host'), - cfg.IntOpt('rabbit_retry_interval', - default=1, - help='how frequently to retry connecting with RabbitMQ'), - cfg.IntOpt('rabbit_retry_backoff', - default=2, - help='how long to backoff for between retries when connecting ' - 'to RabbitMQ'), - cfg.IntOpt('rabbit_max_retries', - default=0, - help='maximum retries with trying to connect to RabbitMQ ' - '(the default of 0 implies an infinite retry count)'), - cfg.BoolOpt('rabbit_ha_queues', - default=False, - help='use H/A queues in RabbitMQ (x-ha-policy: all).' - 'You need to wipe RabbitMQ database when ' - 'changing this option.'), - -] - -cfg.CONF.register_opts(kombu_opts) - -LOG = rpc_common.LOG - - -def _get_queue_arguments(conf): - """Construct the arguments for declaring a queue. - - If the rabbit_ha_queues option is set, we declare a mirrored queue - as described here: - - http://www.rabbitmq.com/ha.html - - Setting x-ha-policy to all means that the queue will be mirrored - to all nodes in the cluster. - """ - return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {} - - -class ConsumerBase(object): - """Consumer base class.""" - - def __init__(self, channel, callback, tag, **kwargs): - """Declare a queue on an amqp channel. - - 'channel' is the amqp channel to use - 'callback' is the callback to call when messages are received - 'tag' is a unique ID for the consumer on the channel - - queue name, exchange name, and other kombu options are - passed in here as a dictionary. - """ - self.callback = callback - self.tag = str(tag) - self.kwargs = kwargs - self.queue = None - self.ack_on_error = kwargs.get('ack_on_error', True) - self.reconnect(channel) - - def reconnect(self, channel): - """Re-declare the queue after a rabbit reconnect.""" - self.channel = channel - self.kwargs['channel'] = channel - self.queue = kombu.entity.Queue(**self.kwargs) - self.queue.declare() - - def _callback_handler(self, message, callback): - """Call callback with deserialized message. - - Messages that are processed without exception are ack'ed. - - If the message processing generates an exception, it will be - ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed. - """ - - try: - msg = rpc_common.deserialize_msg(message.payload) - callback(msg) - except Exception: - if self.ack_on_error: - LOG.exception(_("Failed to process message" - " ... skipping it.")) - message.ack() - else: - LOG.exception(_("Failed to process message" - " ... will requeue.")) - message.requeue() - else: - message.ack() - - def consume(self, *args, **kwargs): - """Actually declare the consumer on the amqp channel. This will - start the flow of messages from the queue. Using the - Connection.iterconsume() iterator will process the messages, - calling the appropriate callback. - - If a callback is specified in kwargs, use that. Otherwise, - use the callback passed during __init__() - - If kwargs['nowait'] is True, then this call will block until - a message is read. - - """ - - options = {'consumer_tag': self.tag} - options['nowait'] = kwargs.get('nowait', False) - callback = kwargs.get('callback', self.callback) - if not callback: - raise ValueError("No callback defined") - - def _callback(raw_message): - message = self.channel.message_to_python(raw_message) - self._callback_handler(message, callback) - - self.queue.consume(*args, callback=_callback, **options) - - def cancel(self): - """Cancel the consuming from the queue, if it has started.""" - try: - self.queue.cancel(self.tag) - except KeyError as e: - # NOTE(comstud): Kludge to get around a amqplib bug - if str(e) != "u'%s'" % self.tag: - raise - self.queue = None - - -class DirectConsumer(ConsumerBase): - """Queue/consumer class for 'direct'.""" - - def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): - """Init a 'direct' queue. - - 'channel' is the amqp channel to use - 'msg_id' is the msg_id to listen on - 'callback' is the callback to call when messages are received - 'tag' is a unique ID for the consumer on the channel - - Other kombu options may be passed - """ - # Default options - options = {'durable': False, - 'queue_arguments': _get_queue_arguments(conf), - 'auto_delete': True, - 'exclusive': False} - options.update(kwargs) - exchange = kombu.entity.Exchange(name=msg_id, - type='direct', - durable=options['durable'], - auto_delete=options['auto_delete']) - super(DirectConsumer, self).__init__(channel, - callback, - tag, - name=msg_id, - exchange=exchange, - routing_key=msg_id, - **options) - - -class TopicConsumer(ConsumerBase): - """Consumer class for 'topic'.""" - - def __init__(self, conf, channel, topic, callback, tag, name=None, - exchange_name=None, **kwargs): - """Init a 'topic' queue. - - :param channel: the amqp channel to use - :param topic: the topic to listen on - :paramtype topic: str - :param callback: the callback to call when messages are received - :param tag: a unique ID for the consumer on the channel - :param name: optional queue name, defaults to topic - :paramtype name: str - - Other kombu options may be passed as keyword arguments - """ - # Default options - options = {'durable': conf.amqp_durable_queues, - 'queue_arguments': _get_queue_arguments(conf), - 'auto_delete': conf.amqp_auto_delete, - 'exclusive': False} - options.update(kwargs) - exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) - exchange = kombu.entity.Exchange(name=exchange_name, - type='topic', - durable=options['durable'], - auto_delete=options['auto_delete']) - super(TopicConsumer, self).__init__(channel, - callback, - tag, - name=name or topic, - exchange=exchange, - routing_key=topic, - **options) - - -class FanoutConsumer(ConsumerBase): - """Consumer class for 'fanout'.""" - - def __init__(self, conf, channel, topic, callback, tag, **kwargs): - """Init a 'fanout' queue. - - 'channel' is the amqp channel to use - 'topic' is the topic to listen on - 'callback' is the callback to call when messages are received - 'tag' is a unique ID for the consumer on the channel - - Other kombu options may be passed - """ - unique = uuid.uuid4().hex - exchange_name = '%s_fanout' % topic - queue_name = '%s_fanout_%s' % (topic, unique) - - # Default options - options = {'durable': False, - 'queue_arguments': _get_queue_arguments(conf), - 'auto_delete': True, - 'exclusive': False} - options.update(kwargs) - exchange = kombu.entity.Exchange(name=exchange_name, type='fanout', - durable=options['durable'], - auto_delete=options['auto_delete']) - super(FanoutConsumer, self).__init__(channel, callback, tag, - name=queue_name, - exchange=exchange, - routing_key=topic, - **options) - - -class Publisher(object): - """Base Publisher class.""" - - def __init__(self, channel, exchange_name, routing_key, **kwargs): - """Init the Publisher class with the exchange_name, routing_key, - and other options - """ - self.exchange_name = exchange_name - self.routing_key = routing_key - self.kwargs = kwargs - self.reconnect(channel) - - def reconnect(self, channel): - """Re-establish the Producer after a rabbit reconnection.""" - self.exchange = kombu.entity.Exchange(name=self.exchange_name, - **self.kwargs) - self.producer = kombu.messaging.Producer(exchange=self.exchange, - channel=channel, - routing_key=self.routing_key) - - def send(self, msg, timeout=None): - """Send a message.""" - if timeout: - # - # AMQP TTL is in milliseconds when set in the header. - # - self.producer.publish(msg, headers={'ttl': (timeout * 1000)}) - else: - self.producer.publish(msg) - - -class DirectPublisher(Publisher): - """Publisher class for 'direct'.""" - def __init__(self, conf, channel, msg_id, **kwargs): - """init a 'direct' publisher. - - Kombu options may be passed as keyword args to override defaults - """ - - options = {'durable': False, - 'auto_delete': True, - 'exclusive': False} - options.update(kwargs) - super(DirectPublisher, self).__init__(channel, msg_id, msg_id, - type='direct', **options) - - -class TopicPublisher(Publisher): - """Publisher class for 'topic'.""" - def __init__(self, conf, channel, topic, **kwargs): - """init a 'topic' publisher. - - Kombu options may be passed as keyword args to override defaults - """ - options = {'durable': conf.amqp_durable_queues, - 'auto_delete': conf.amqp_auto_delete, - 'exclusive': False} - options.update(kwargs) - exchange_name = rpc_amqp.get_control_exchange(conf) - super(TopicPublisher, self).__init__(channel, - exchange_name, - topic, - type='topic', - **options) - - -class FanoutPublisher(Publisher): - """Publisher class for 'fanout'.""" - def __init__(self, conf, channel, topic, **kwargs): - """init a 'fanout' publisher. - - Kombu options may be passed as keyword args to override defaults - """ - options = {'durable': False, - 'auto_delete': True, - 'exclusive': False} - options.update(kwargs) - super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic, - None, type='fanout', **options) - - -class NotifyPublisher(TopicPublisher): - """Publisher class for 'notify'.""" - - def __init__(self, conf, channel, topic, **kwargs): - self.durable = kwargs.pop('durable', conf.amqp_durable_queues) - self.queue_arguments = _get_queue_arguments(conf) - super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) - - def reconnect(self, channel): - super(NotifyPublisher, self).reconnect(channel) - - # NOTE(jerdfelt): Normally the consumer would create the queue, but - # we do this to ensure that messages don't get dropped if the - # consumer is started after we do - queue = kombu.entity.Queue(channel=channel, - exchange=self.exchange, - durable=self.durable, - name=self.routing_key, - routing_key=self.routing_key, - queue_arguments=self.queue_arguments) - queue.declare() - - -class Connection(object): - """Connection object.""" - - pool = None - - def __init__(self, conf, server_params=None): - self.consumers = [] - self.consumer_thread = None - self.proxy_callbacks = [] - self.conf = conf - self.max_retries = self.conf.rabbit_max_retries - # Try forever? - if self.max_retries <= 0: - self.max_retries = None - self.interval_start = self.conf.rabbit_retry_interval - self.interval_stepping = self.conf.rabbit_retry_backoff - # max retry-interval = 30 seconds - self.interval_max = 30 - self.memory_transport = False - - if server_params is None: - server_params = {} - # Keys to translate from server_params to kombu params - server_params_to_kombu_params = {'username': 'userid'} - - ssl_params = self._fetch_ssl_params() - params_list = [] - for adr in self.conf.rabbit_hosts: - hostname, port = network_utils.parse_host_port( - adr, default_port=self.conf.rabbit_port) - - params = { - 'hostname': hostname, - 'port': port, - 'userid': self.conf.rabbit_userid, - 'password': self.conf.rabbit_password, - 'virtual_host': self.conf.rabbit_virtual_host, - } - - for sp_key, value in server_params.iteritems(): - p_key = server_params_to_kombu_params.get(sp_key, sp_key) - params[p_key] = value - - if self.conf.fake_rabbit: - params['transport'] = 'memory' - if self.conf.rabbit_use_ssl: - params['ssl'] = ssl_params - - params_list.append(params) - - self.params_list = params_list - - self.memory_transport = self.conf.fake_rabbit - - self.connection = None - self.reconnect() - - def _fetch_ssl_params(self): - """Handles fetching what ssl params should be used for the connection - (if any). - """ - ssl_params = dict() - - # http://docs.python.org/library/ssl.html - ssl.wrap_socket - if self.conf.kombu_ssl_version: - ssl_params['ssl_version'] = sslutils.validate_ssl_version( - self.conf.kombu_ssl_version) - if self.conf.kombu_ssl_keyfile: - ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile - if self.conf.kombu_ssl_certfile: - ssl_params['certfile'] = self.conf.kombu_ssl_certfile - if self.conf.kombu_ssl_ca_certs: - ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs - # We might want to allow variations in the - # future with this? - ssl_params['cert_reqs'] = ssl.CERT_REQUIRED - - # Return the extended behavior or just have the default behavior - return ssl_params or True - - def _connect(self, params): - """Connect to rabbit. Re-establish any queues that may have - been declared before if we are reconnecting. Exceptions should - be handled by the caller. - """ - if self.connection: - LOG.info(_("Reconnecting to AMQP server on " - "%(hostname)s:%(port)d") % params) - try: - self.connection.release() - except self.connection_errors: - pass - # Setting this in case the next statement fails, though - # it shouldn't be doing any network operations, yet. - self.connection = None - self.connection = kombu.connection.BrokerConnection(**params) - self.connection_errors = self.connection.connection_errors - if self.memory_transport: - # Kludge to speed up tests. - self.connection.transport.polling_interval = 0.0 - self.consumer_num = itertools.count(1) - self.connection.connect() - self.channel = self.connection.channel() - # work around 'memory' transport bug in 1.1.3 - if self.memory_transport: - self.channel._new_queue('ae.undeliver') - for consumer in self.consumers: - consumer.reconnect(self.channel) - LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % - params) - - def reconnect(self): - """Handles reconnecting and re-establishing queues. - Will retry up to self.max_retries number of times. - self.max_retries = 0 means to retry forever. - Sleep between tries, starting at self.interval_start - seconds, backing off self.interval_stepping number of seconds - each attempt. - """ - - attempt = 0 - while True: - params = self.params_list[attempt % len(self.params_list)] - attempt += 1 - try: - self._connect(params) - return - except (IOError, self.connection_errors) as e: - pass - except Exception as e: - # NOTE(comstud): Unfortunately it's possible for amqplib - # to return an error not covered by its transport - # connection_errors in the case of a timeout waiting for - # a protocol response. (See paste link in LP888621) - # So, we check all exceptions for 'timeout' in them - # and try to reconnect in this case. - if 'timeout' not in str(e): - raise - - log_info = {} - log_info['err_str'] = str(e) - log_info['max_retries'] = self.max_retries - log_info.update(params) - - if self.max_retries and attempt == self.max_retries: - msg = _('Unable to connect to AMQP server on ' - '%(hostname)s:%(port)d after %(max_retries)d ' - 'tries: %(err_str)s') % log_info - LOG.error(msg) - raise rpc_common.RPCException(msg) - - if attempt == 1: - sleep_time = self.interval_start or 1 - elif attempt > 1: - sleep_time += self.interval_stepping - if self.interval_max: - sleep_time = min(sleep_time, self.interval_max) - - log_info['sleep_time'] = sleep_time - LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' - 'unreachable: %(err_str)s. Trying again in ' - '%(sleep_time)d seconds.') % log_info) - time.sleep(sleep_time) - - def ensure(self, error_callback, method, *args, **kwargs): - while True: - try: - return method(*args, **kwargs) - except (self.connection_errors, socket.timeout, IOError) as e: - if error_callback: - error_callback(e) - except Exception as e: - # NOTE(comstud): Unfortunately it's possible for amqplib - # to return an error not covered by its transport - # connection_errors in the case of a timeout waiting for - # a protocol response. (See paste link in LP888621) - # So, we check all exceptions for 'timeout' in them - # and try to reconnect in this case. - if 'timeout' not in str(e): - raise - if error_callback: - error_callback(e) - self.reconnect() - - def get_channel(self): - """Convenience call for bin/clear_rabbit_queues.""" - return self.channel - - def close(self): - """Close/release this connection.""" - self.cancel_consumer_thread() - self.wait_on_proxy_callbacks() - self.connection.release() - self.connection = None - - def reset(self): - """Reset a connection so it can be used again.""" - self.cancel_consumer_thread() - self.wait_on_proxy_callbacks() - self.channel.close() - self.channel = self.connection.channel() - # work around 'memory' transport bug in 1.1.3 - if self.memory_transport: - self.channel._new_queue('ae.undeliver') - self.consumers = [] - - def declare_consumer(self, consumer_cls, topic, callback): - """Create a Consumer using the class that was passed in and - add it to our list of consumers - """ - - def _connect_error(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.error(_("Failed to declare consumer for topic '%(topic)s': " - "%(err_str)s") % log_info) - - def _declare_consumer(): - consumer = consumer_cls(self.conf, self.channel, topic, callback, - self.consumer_num.next()) - self.consumers.append(consumer) - return consumer - - return self.ensure(_connect_error, _declare_consumer) - - def iterconsume(self, limit=None, timeout=None): - """Return an iterator that will consume from all queues/consumers.""" - - info = {'do_consume': True} - - def _error_callback(exc): - if isinstance(exc, socket.timeout): - LOG.debug(_('Timed out waiting for RPC response: %s') % - str(exc)) - raise rpc_common.Timeout() - else: - LOG.exception(_('Failed to consume message from queue: %s') % - str(exc)) - info['do_consume'] = True - - def _consume(): - if info['do_consume']: - queues_head = self.consumers[:-1] # not fanout. - queues_tail = self.consumers[-1] # fanout - for queue in queues_head: - queue.consume(nowait=True) - queues_tail.consume(nowait=False) - info['do_consume'] = False - return self.connection.drain_events(timeout=timeout) - - for iteration in itertools.count(0): - if limit and iteration >= limit: - raise StopIteration - yield self.ensure(_error_callback, _consume) - - def cancel_consumer_thread(self): - """Cancel a consumer thread.""" - if self.consumer_thread is not None: - self.consumer_thread.kill() - try: - self.consumer_thread.wait() - except greenlet.GreenletExit: - pass - self.consumer_thread = None - - def wait_on_proxy_callbacks(self): - """Wait for all proxy callback threads to exit.""" - for proxy_cb in self.proxy_callbacks: - proxy_cb.wait() - - def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): - """Send to a publisher based on the publisher class.""" - - def _error_callback(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.exception(_("Failed to publish message to topic " - "'%(topic)s': %(err_str)s") % log_info) - - def _publish(): - publisher = cls(self.conf, self.channel, topic, **kwargs) - publisher.send(msg, timeout) - - self.ensure(_error_callback, _publish) - - def declare_direct_consumer(self, topic, callback): - """Create a 'direct' queue. - In nova's use, this is generally a msg_id queue used for - responses for call/multicall - """ - self.declare_consumer(DirectConsumer, topic, callback) - - def declare_topic_consumer(self, topic, callback=None, queue_name=None, - exchange_name=None, ack_on_error=True): - """Create a 'topic' consumer.""" - self.declare_consumer(functools.partial(TopicConsumer, - name=queue_name, - exchange_name=exchange_name, - ack_on_error=ack_on_error, - ), - topic, callback) - - def declare_fanout_consumer(self, topic, callback): - """Create a 'fanout' consumer.""" - self.declare_consumer(FanoutConsumer, topic, callback) - - def direct_send(self, msg_id, msg): - """Send a 'direct' message.""" - self.publisher_send(DirectPublisher, msg_id, msg) - - def topic_send(self, topic, msg, timeout=None): - """Send a 'topic' message.""" - self.publisher_send(TopicPublisher, topic, msg, timeout) - - def fanout_send(self, topic, msg): - """Send a 'fanout' message.""" - self.publisher_send(FanoutPublisher, topic, msg) - - def notify_send(self, topic, msg, **kwargs): - """Send a notify message on a topic.""" - self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) - - def consume(self, limit=None): - """Consume from all queues/consumers.""" - it = self.iterconsume(limit=limit) - while True: - try: - it.next() - except StopIteration: - return - - def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread.""" - @excutils.forever_retry_uncaught_exceptions - def _consumer_thread(): - try: - self.consume() - except greenlet.GreenletExit: - return - if self.consumer_thread is None: - self.consumer_thread = eventlet.spawn(_consumer_thread) - return self.consumer_thread - - def create_consumer(self, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object.""" - proxy_cb = rpc_amqp.ProxyCallback( - self.conf, proxy, - rpc_amqp.get_connection_pool(self.conf, Connection)) - self.proxy_callbacks.append(proxy_cb) - - if fanout: - self.declare_fanout_consumer(topic, proxy_cb) - else: - self.declare_topic_consumer(topic, proxy_cb) - - def create_worker(self, topic, proxy, pool_name): - """Create a worker that calls a method in a proxy object.""" - proxy_cb = rpc_amqp.ProxyCallback( - self.conf, proxy, - rpc_amqp.get_connection_pool(self.conf, Connection)) - self.proxy_callbacks.append(proxy_cb) - self.declare_topic_consumer(topic, proxy_cb, pool_name) - - def join_consumer_pool(self, callback, pool_name, topic, - exchange_name=None, ack_on_error=True): - """Register as a member of a group of consumers for a given topic from - the specified exchange. - - Exactly one member of a given pool will receive each message. - - A message will be delivered to multiple pools, if more than - one is created. - """ - callback_wrapper = rpc_amqp.CallbackWrapper( - conf=self.conf, - callback=callback, - connection_pool=rpc_amqp.get_connection_pool(self.conf, - Connection), - wait_for_consumers=not ack_on_error - ) - self.proxy_callbacks.append(callback_wrapper) - self.declare_topic_consumer( - queue_name=pool_name, - topic=topic, - exchange_name=exchange_name, - callback=callback_wrapper, - ack_on_error=ack_on_error, - ) - - -def create_connection(conf, new=True): - """Create a connection.""" - return rpc_amqp.create_connection( - conf, new, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def multicall(conf, context, topic, msg, timeout=None): - """Make a call that returns multiple times.""" - return rpc_amqp.multicall( - conf, context, topic, msg, timeout, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def call(conf, context, topic, msg, timeout=None): - """Sends a message on a topic and wait for a response.""" - return rpc_amqp.call( - conf, context, topic, msg, timeout, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def cast(conf, context, topic, msg): - """Sends a message on a topic without waiting for a response.""" - return rpc_amqp.cast( - conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def fanout_cast(conf, context, topic, msg): - """Sends a message on a fanout exchange without waiting for a response.""" - return rpc_amqp.fanout_cast( - conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def cast_to_server(conf, context, server_params, topic, msg): - """Sends a message on a topic to a specific server.""" - return rpc_amqp.cast_to_server( - conf, context, server_params, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def fanout_cast_to_server(conf, context, server_params, topic, msg): - """Sends a message on a fanout exchange to a specific server.""" - return rpc_amqp.fanout_cast_to_server( - conf, context, server_params, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def notify(conf, context, topic, msg, envelope): - """Sends a notification event on a topic.""" - return rpc_amqp.notify( - conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection), - envelope) - - -def cleanup(): - return rpc_amqp.cleanup(Connection.pool) diff --git a/libra/openstack/common/rpc/impl_qpid.py b/libra/openstack/common/rpc/impl_qpid.py deleted file mode 100644 index 0c4e2b1a..00000000 --- a/libra/openstack/common/rpc/impl_qpid.py +++ /dev/null @@ -1,833 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation -# Copyright 2011 - 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import itertools -import time -import uuid - -import eventlet -import greenlet -from oslo.config import cfg - -from libra.openstack.common import excutils -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils -from libra.openstack.common import jsonutils -from libra.openstack.common import log as logging -from libra.openstack.common.rpc import amqp as rpc_amqp -from libra.openstack.common.rpc import common as rpc_common - -qpid_codec = importutils.try_import("qpid.codec010") -qpid_messaging = importutils.try_import("qpid.messaging") -qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") - -LOG = logging.getLogger(__name__) - -qpid_opts = [ - cfg.StrOpt('qpid_hostname', - default='localhost', - help='Qpid broker hostname'), - cfg.IntOpt('qpid_port', - default=5672, - help='Qpid broker port'), - cfg.ListOpt('qpid_hosts', - default=['$qpid_hostname:$qpid_port'], - help='Qpid HA cluster host:port pairs'), - cfg.StrOpt('qpid_username', - default='', - help='Username for qpid connection'), - cfg.StrOpt('qpid_password', - default='', - help='Password for qpid connection', - secret=True), - cfg.StrOpt('qpid_sasl_mechanisms', - default='', - help='Space separated list of SASL mechanisms to use for auth'), - cfg.IntOpt('qpid_heartbeat', - default=60, - help='Seconds between connection keepalive heartbeats'), - cfg.StrOpt('qpid_protocol', - default='tcp', - help="Transport to use, either 'tcp' or 'ssl'"), - cfg.BoolOpt('qpid_tcp_nodelay', - default=True, - help='Disable Nagle algorithm'), - # NOTE(russellb) If any additional versions are added (beyond 1 and 2), - # this file could probably use some additional refactoring so that the - # differences between each version are split into different classes. - cfg.IntOpt('qpid_topology_version', - default=1, - help="The qpid topology version to use. Version 1 is what " - "was originally used by impl_qpid. Version 2 includes " - "some backwards-incompatible changes that allow broker " - "federation to work. Users should update to version 2 " - "when they are able to take everything down, as it " - "requires a clean break."), -] - -cfg.CONF.register_opts(qpid_opts) - -JSON_CONTENT_TYPE = 'application/json; charset=utf8' - - -def raise_invalid_topology_version(conf): - msg = (_("Invalid value for qpid_topology_version: %d") % - conf.qpid_topology_version) - LOG.error(msg) - raise Exception(msg) - - -class ConsumerBase(object): - """Consumer base class.""" - - def __init__(self, conf, session, callback, node_name, node_opts, - link_name, link_opts): - """Declare a queue on an amqp session. - - 'session' is the amqp session to use - 'callback' is the callback to call when messages are received - 'node_name' is the first part of the Qpid address string, before ';' - 'node_opts' will be applied to the "x-declare" section of "node" - in the address string. - 'link_name' goes into the "name" field of the "link" in the address - string - 'link_opts' will be applied to the "x-declare" section of "link" - in the address string. - """ - self.callback = callback - self.receiver = None - self.session = None - - if conf.qpid_topology_version == 1: - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { - "durable": True, - "auto-delete": True, - }, - }, - "link": { - "name": link_name, - "durable": True, - "x-declare": { - "durable": False, - "auto-delete": True, - "exclusive": False, - }, - }, - } - addr_opts["node"]["x-declare"].update(node_opts) - elif conf.qpid_topology_version == 2: - addr_opts = { - "link": { - "x-declare": { - "auto-delete": True, - }, - }, - } - else: - raise_invalid_topology_version() - - addr_opts["link"]["x-declare"].update(link_opts) - - self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) - - self.connect(session) - - def connect(self, session): - """Declare the reciever on connect.""" - self._declare_receiver(session) - - def reconnect(self, session): - """Re-declare the receiver after a qpid reconnect.""" - self._declare_receiver(session) - - def _declare_receiver(self, session): - self.session = session - self.receiver = session.receiver(self.address) - self.receiver.capacity = 1 - - def _unpack_json_msg(self, msg): - """Load the JSON data in msg if msg.content_type indicates that it - is necessary. Put the loaded data back into msg.content and - update msg.content_type appropriately. - - A Qpid Message containing a dict will have a content_type of - 'amqp/map', whereas one containing a string that needs to be converted - back from JSON will have a content_type of JSON_CONTENT_TYPE. - - :param msg: a Qpid Message object - :returns: None - """ - if msg.content_type == JSON_CONTENT_TYPE: - msg.content = jsonutils.loads(msg.content) - msg.content_type = 'amqp/map' - - def consume(self): - """Fetch the message and pass it to the callback object.""" - message = self.receiver.fetch() - try: - self._unpack_json_msg(message) - msg = rpc_common.deserialize_msg(message.content) - self.callback(msg) - except Exception: - LOG.exception(_("Failed to process message... skipping it.")) - finally: - # TODO(sandy): Need support for optional ack_on_error. - self.session.acknowledge(message) - - def get_receiver(self): - return self.receiver - - def get_node_name(self): - return self.address.split(';')[0] - - -class DirectConsumer(ConsumerBase): - """Queue/consumer class for 'direct'.""" - - def __init__(self, conf, session, msg_id, callback): - """Init a 'direct' queue. - - 'session' is the amqp session to use - 'msg_id' is the msg_id to listen on - 'callback' is the callback to call when messages are received - """ - - link_opts = { - "auto-delete": conf.amqp_auto_delete, - "exclusive": True, - "durable": conf.amqp_durable_queues, - } - - if conf.qpid_topology_version == 1: - node_name = "%s/%s" % (msg_id, msg_id) - node_opts = {"type": "direct"} - elif conf.qpid_topology_version == 2: - node_name = "amq.direct/%s" % msg_id - node_opts = {} - else: - raise_invalid_topology_version() - - super(DirectConsumer, self).__init__(conf, session, callback, - node_name, node_opts, msg_id, - link_opts) - - -class TopicConsumer(ConsumerBase): - """Consumer class for 'topic'.""" - - def __init__(self, conf, session, topic, callback, name=None, - exchange_name=None): - """Init a 'topic' queue. - - :param session: the amqp session to use - :param topic: is the topic to listen on - :paramtype topic: str - :param callback: the callback to call when messages are received - :param name: optional queue name, defaults to topic - """ - - exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) - link_opts = { - "auto-delete": conf.amqp_auto_delete, - "durable": conf.amqp_durable_queues, - } - - if conf.qpid_topology_version == 1: - node_name = "%s/%s" % (exchange_name, topic) - elif conf.qpid_topology_version == 2: - node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) - else: - raise_invalid_topology_version() - - super(TopicConsumer, self).__init__(conf, session, callback, node_name, - {}, name or topic, link_opts) - - -class FanoutConsumer(ConsumerBase): - """Consumer class for 'fanout'.""" - - def __init__(self, conf, session, topic, callback): - """Init a 'fanout' queue. - - 'session' is the amqp session to use - 'topic' is the topic to listen on - 'callback' is the callback to call when messages are received - """ - self.conf = conf - - link_opts = {"exclusive": True} - - if conf.qpid_topology_version == 1: - node_name = "%s_fanout" % topic - node_opts = {"durable": False, "type": "fanout"} - link_name = "%s_fanout_%s" % (topic, uuid.uuid4().hex) - elif conf.qpid_topology_version == 2: - node_name = "amq.topic/fanout/%s" % topic - node_opts = {} - link_name = "" - else: - raise_invalid_topology_version() - - super(FanoutConsumer, self).__init__(conf, session, callback, - node_name, node_opts, link_name, - link_opts) - - def reconnect(self, session): - topic = self.get_node_name().rpartition('_fanout')[0] - params = { - 'session': session, - 'topic': topic, - 'callback': self.callback, - } - - self.__init__(conf=self.conf, **params) - - super(FanoutConsumer, self).reconnect(session) - - -class Publisher(object): - """Base Publisher class.""" - - def __init__(self, conf, session, node_name, node_opts=None): - """Init the Publisher class with the exchange_name, routing_key, - and other options - """ - self.sender = None - self.session = session - - if conf.qpid_topology_version == 1: - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { - "durable": False, - # auto-delete isn't implemented for exchanges in qpid, - # but put in here anyway - "auto-delete": True, - }, - }, - } - if node_opts: - addr_opts["node"]["x-declare"].update(node_opts) - - self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) - elif conf.qpid_topology_version == 2: - self.address = node_name - else: - raise_invalid_topology_version() - - self.reconnect(session) - - def reconnect(self, session): - """Re-establish the Sender after a reconnection.""" - self.sender = session.sender(self.address) - - def _pack_json_msg(self, msg): - """Qpid cannot serialize dicts containing strings longer than 65535 - characters. This function dumps the message content to a JSON - string, which Qpid is able to handle. - - :param msg: May be either a Qpid Message object or a bare dict. - :returns: A Qpid Message with its content field JSON encoded. - """ - try: - msg.content = jsonutils.dumps(msg.content) - except AttributeError: - # Need to have a Qpid message so we can set the content_type. - msg = qpid_messaging.Message(jsonutils.dumps(msg)) - msg.content_type = JSON_CONTENT_TYPE - return msg - - def send(self, msg): - """Send a message.""" - try: - # Check if Qpid can encode the message - check_msg = msg - if not hasattr(check_msg, 'content_type'): - check_msg = qpid_messaging.Message(msg) - content_type = check_msg.content_type - enc, dec = qpid_messaging.message.get_codec(content_type) - enc(check_msg.content) - except qpid_codec.CodecException: - # This means the message couldn't be serialized as a dict. - msg = self._pack_json_msg(msg) - self.sender.send(msg) - - -class DirectPublisher(Publisher): - """Publisher class for 'direct'.""" - def __init__(self, conf, session, msg_id): - """Init a 'direct' publisher.""" - - if conf.qpid_topology_version == 1: - node_name = msg_id - node_opts = {"type": "direct"} - elif conf.qpid_topology_version == 2: - node_name = "amq.direct/%s" % msg_id - node_opts = {} - else: - raise_invalid_topology_version() - - super(DirectPublisher, self).__init__(conf, session, node_name, - node_opts) - - -class TopicPublisher(Publisher): - """Publisher class for 'topic'.""" - def __init__(self, conf, session, topic): - """init a 'topic' publisher. - """ - exchange_name = rpc_amqp.get_control_exchange(conf) - - if conf.qpid_topology_version == 1: - node_name = "%s/%s" % (exchange_name, topic) - elif conf.qpid_topology_version == 2: - node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) - else: - raise_invalid_topology_version() - - super(TopicPublisher, self).__init__(conf, session, node_name) - - -class FanoutPublisher(Publisher): - """Publisher class for 'fanout'.""" - def __init__(self, conf, session, topic): - """init a 'fanout' publisher. - """ - - if conf.qpid_topology_version == 1: - node_name = "%s_fanout" % topic - node_opts = {"type": "fanout"} - elif conf.qpid_topology_version == 2: - node_name = "amq.topic/fanout/%s" % topic - node_opts = {} - else: - raise_invalid_topology_version() - - super(FanoutPublisher, self).__init__(conf, session, node_name, - node_opts) - - -class NotifyPublisher(Publisher): - """Publisher class for notifications.""" - def __init__(self, conf, session, topic): - """init a 'topic' publisher. - """ - exchange_name = rpc_amqp.get_control_exchange(conf) - node_opts = {"durable": True} - - if conf.qpid_topology_version == 1: - node_name = "%s/%s" % (exchange_name, topic) - elif conf.qpid_topology_version == 2: - node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) - else: - raise_invalid_topology_version() - - super(NotifyPublisher, self).__init__(conf, session, node_name, - node_opts) - - -class Connection(object): - """Connection object.""" - - pool = None - - def __init__(self, conf, server_params=None): - if not qpid_messaging: - raise ImportError("Failed to import qpid.messaging") - - self.session = None - self.consumers = {} - self.consumer_thread = None - self.proxy_callbacks = [] - self.conf = conf - - if server_params and 'hostname' in server_params: - # NOTE(russellb) This enables support for cast_to_server. - server_params['qpid_hosts'] = [ - '%s:%d' % (server_params['hostname'], - server_params.get('port', 5672)) - ] - - params = { - 'qpid_hosts': self.conf.qpid_hosts, - 'username': self.conf.qpid_username, - 'password': self.conf.qpid_password, - } - params.update(server_params or {}) - - self.brokers = params['qpid_hosts'] - self.username = params['username'] - self.password = params['password'] - self.connection_create(self.brokers[0]) - self.reconnect() - - def connection_create(self, broker): - # Create the connection - this does not open the connection - self.connection = qpid_messaging.Connection(broker) - - # Check if flags are set and if so set them for the connection - # before we call open - self.connection.username = self.username - self.connection.password = self.password - - self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms - # Reconnection is done by self.reconnect() - self.connection.reconnect = False - self.connection.heartbeat = self.conf.qpid_heartbeat - self.connection.transport = self.conf.qpid_protocol - self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay - - def _register_consumer(self, consumer): - self.consumers[str(consumer.get_receiver())] = consumer - - def _lookup_consumer(self, receiver): - return self.consumers[str(receiver)] - - def reconnect(self): - """Handles reconnecting and re-establishing sessions and queues.""" - attempt = 0 - delay = 1 - while True: - # Close the session if necessary - if self.connection.opened(): - try: - self.connection.close() - except qpid_exceptions.ConnectionError: - pass - - broker = self.brokers[attempt % len(self.brokers)] - attempt += 1 - - try: - self.connection_create(broker) - self.connection.open() - except qpid_exceptions.ConnectionError as e: - msg_dict = dict(e=e, delay=delay) - msg = _("Unable to connect to AMQP server: %(e)s. " - "Sleeping %(delay)s seconds") % msg_dict - LOG.error(msg) - time.sleep(delay) - delay = min(2 * delay, 60) - else: - LOG.info(_('Connected to AMQP server on %s'), broker) - break - - self.session = self.connection.session() - - if self.consumers: - consumers = self.consumers - self.consumers = {} - - for consumer in consumers.itervalues(): - consumer.reconnect(self.session) - self._register_consumer(consumer) - - LOG.debug(_("Re-established AMQP queues")) - - def ensure(self, error_callback, method, *args, **kwargs): - while True: - try: - return method(*args, **kwargs) - except (qpid_exceptions.Empty, - qpid_exceptions.ConnectionError) as e: - if error_callback: - error_callback(e) - self.reconnect() - - def close(self): - """Close/release this connection.""" - self.cancel_consumer_thread() - self.wait_on_proxy_callbacks() - try: - self.connection.close() - except Exception: - # NOTE(dripton) Logging exceptions that happen during cleanup just - # causes confusion; there's really nothing useful we can do with - # them. - pass - self.connection = None - - def reset(self): - """Reset a connection so it can be used again.""" - self.cancel_consumer_thread() - self.wait_on_proxy_callbacks() - self.session.close() - self.session = self.connection.session() - self.consumers = {} - - def declare_consumer(self, consumer_cls, topic, callback): - """Create a Consumer using the class that was passed in and - add it to our list of consumers - """ - def _connect_error(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.error(_("Failed to declare consumer for topic '%(topic)s': " - "%(err_str)s") % log_info) - - def _declare_consumer(): - consumer = consumer_cls(self.conf, self.session, topic, callback) - self._register_consumer(consumer) - return consumer - - return self.ensure(_connect_error, _declare_consumer) - - def iterconsume(self, limit=None, timeout=None): - """Return an iterator that will consume from all queues/consumers.""" - - def _error_callback(exc): - if isinstance(exc, qpid_exceptions.Empty): - LOG.debug(_('Timed out waiting for RPC response: %s') % - str(exc)) - raise rpc_common.Timeout() - else: - LOG.exception(_('Failed to consume message from queue: %s') % - str(exc)) - - def _consume(): - nxt_receiver = self.session.next_receiver(timeout=timeout) - try: - self._lookup_consumer(nxt_receiver).consume() - except Exception: - LOG.exception(_("Error processing message. Skipping it.")) - - for iteration in itertools.count(0): - if limit and iteration >= limit: - raise StopIteration - yield self.ensure(_error_callback, _consume) - - def cancel_consumer_thread(self): - """Cancel a consumer thread.""" - if self.consumer_thread is not None: - self.consumer_thread.kill() - try: - self.consumer_thread.wait() - except greenlet.GreenletExit: - pass - self.consumer_thread = None - - def wait_on_proxy_callbacks(self): - """Wait for all proxy callback threads to exit.""" - for proxy_cb in self.proxy_callbacks: - proxy_cb.wait() - - def publisher_send(self, cls, topic, msg): - """Send to a publisher based on the publisher class.""" - - def _connect_error(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.exception(_("Failed to publish message to topic " - "'%(topic)s': %(err_str)s") % log_info) - - def _publisher_send(): - publisher = cls(self.conf, self.session, topic) - publisher.send(msg) - - return self.ensure(_connect_error, _publisher_send) - - def declare_direct_consumer(self, topic, callback): - """Create a 'direct' queue. - In nova's use, this is generally a msg_id queue used for - responses for call/multicall - """ - self.declare_consumer(DirectConsumer, topic, callback) - - def declare_topic_consumer(self, topic, callback=None, queue_name=None, - exchange_name=None): - """Create a 'topic' consumer.""" - self.declare_consumer(functools.partial(TopicConsumer, - name=queue_name, - exchange_name=exchange_name, - ), - topic, callback) - - def declare_fanout_consumer(self, topic, callback): - """Create a 'fanout' consumer.""" - self.declare_consumer(FanoutConsumer, topic, callback) - - def direct_send(self, msg_id, msg): - """Send a 'direct' message.""" - self.publisher_send(DirectPublisher, msg_id, msg) - - def topic_send(self, topic, msg, timeout=None): - """Send a 'topic' message.""" - # - # We want to create a message with attributes, e.g. a TTL. We - # don't really need to keep 'msg' in its JSON format any longer - # so let's create an actual qpid message here and get some - # value-add on the go. - # - # WARNING: Request timeout happens to be in the same units as - # qpid's TTL (seconds). If this changes in the future, then this - # will need to be altered accordingly. - # - qpid_message = qpid_messaging.Message(content=msg, ttl=timeout) - self.publisher_send(TopicPublisher, topic, qpid_message) - - def fanout_send(self, topic, msg): - """Send a 'fanout' message.""" - self.publisher_send(FanoutPublisher, topic, msg) - - def notify_send(self, topic, msg, **kwargs): - """Send a notify message on a topic.""" - self.publisher_send(NotifyPublisher, topic, msg) - - def consume(self, limit=None): - """Consume from all queues/consumers.""" - it = self.iterconsume(limit=limit) - while True: - try: - it.next() - except StopIteration: - return - - def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread.""" - @excutils.forever_retry_uncaught_exceptions - def _consumer_thread(): - try: - self.consume() - except greenlet.GreenletExit: - return - if self.consumer_thread is None: - self.consumer_thread = eventlet.spawn(_consumer_thread) - return self.consumer_thread - - def create_consumer(self, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object.""" - proxy_cb = rpc_amqp.ProxyCallback( - self.conf, proxy, - rpc_amqp.get_connection_pool(self.conf, Connection)) - self.proxy_callbacks.append(proxy_cb) - - if fanout: - consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) - else: - consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb) - - self._register_consumer(consumer) - - return consumer - - def create_worker(self, topic, proxy, pool_name): - """Create a worker that calls a method in a proxy object.""" - proxy_cb = rpc_amqp.ProxyCallback( - self.conf, proxy, - rpc_amqp.get_connection_pool(self.conf, Connection)) - self.proxy_callbacks.append(proxy_cb) - - consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, - name=pool_name) - - self._register_consumer(consumer) - - return consumer - - def join_consumer_pool(self, callback, pool_name, topic, - exchange_name=None, ack_on_error=True): - """Register as a member of a group of consumers for a given topic from - the specified exchange. - - Exactly one member of a given pool will receive each message. - - A message will be delivered to multiple pools, if more than - one is created. - """ - callback_wrapper = rpc_amqp.CallbackWrapper( - conf=self.conf, - callback=callback, - connection_pool=rpc_amqp.get_connection_pool(self.conf, - Connection), - wait_for_consumers=not ack_on_error - ) - self.proxy_callbacks.append(callback_wrapper) - - consumer = TopicConsumer(conf=self.conf, - session=self.session, - topic=topic, - callback=callback_wrapper, - name=pool_name, - exchange_name=exchange_name) - - self._register_consumer(consumer) - return consumer - - -def create_connection(conf, new=True): - """Create a connection.""" - return rpc_amqp.create_connection( - conf, new, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def multicall(conf, context, topic, msg, timeout=None): - """Make a call that returns multiple times.""" - return rpc_amqp.multicall( - conf, context, topic, msg, timeout, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def call(conf, context, topic, msg, timeout=None): - """Sends a message on a topic and wait for a response.""" - return rpc_amqp.call( - conf, context, topic, msg, timeout, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def cast(conf, context, topic, msg): - """Sends a message on a topic without waiting for a response.""" - return rpc_amqp.cast( - conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def fanout_cast(conf, context, topic, msg): - """Sends a message on a fanout exchange without waiting for a response.""" - return rpc_amqp.fanout_cast( - conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def cast_to_server(conf, context, server_params, topic, msg): - """Sends a message on a topic to a specific server.""" - return rpc_amqp.cast_to_server( - conf, context, server_params, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def fanout_cast_to_server(conf, context, server_params, topic, msg): - """Sends a message on a fanout exchange to a specific server.""" - return rpc_amqp.fanout_cast_to_server( - conf, context, server_params, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) - - -def notify(conf, context, topic, msg, envelope): - """Sends a notification event on a topic.""" - return rpc_amqp.notify(conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection), - envelope) - - -def cleanup(): - return rpc_amqp.cleanup(Connection.pool) diff --git a/libra/openstack/common/rpc/impl_zmq.py b/libra/openstack/common/rpc/impl_zmq.py deleted file mode 100644 index aea96a90..00000000 --- a/libra/openstack/common/rpc/impl_zmq.py +++ /dev/null @@ -1,818 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import pprint -import re -import socket -import sys -import types -import uuid - -import eventlet -import greenlet -from oslo.config import cfg - -from libra.openstack.common import excutils -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils -from libra.openstack.common import jsonutils -from libra.openstack.common.rpc import common as rpc_common - -zmq = importutils.try_import('eventlet.green.zmq') - -# for convenience, are not modified. -pformat = pprint.pformat -Timeout = eventlet.timeout.Timeout -LOG = rpc_common.LOG -RemoteError = rpc_common.RemoteError -RPCException = rpc_common.RPCException - -zmq_opts = [ - cfg.StrOpt('rpc_zmq_bind_address', default='*', - help='ZeroMQ bind address. Should be a wildcard (*), ' - 'an ethernet interface, or IP. ' - 'The "host" option should point or resolve to this ' - 'address.'), - - # The module.Class to use for matchmaking. - cfg.StrOpt( - 'rpc_zmq_matchmaker', - default=('libra.openstack.common.rpc.' - 'matchmaker.MatchMakerLocalhost'), - help='MatchMaker driver', - ), - - # The following port is unassigned by IANA as of 2012-05-21 - cfg.IntOpt('rpc_zmq_port', default=9501, - help='ZeroMQ receiver listening port'), - - cfg.IntOpt('rpc_zmq_contexts', default=1, - help='Number of ZeroMQ contexts, defaults to 1'), - - cfg.IntOpt('rpc_zmq_topic_backlog', default=None, - help='Maximum number of ingress messages to locally buffer ' - 'per topic. Default is unlimited.'), - - cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', - help='Directory for holding IPC sockets'), - - cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), - help='Name of this node. Must be a valid hostname, FQDN, or ' - 'IP address. Must match "host" option, if running Nova.') -] - - -CONF = cfg.CONF -CONF.register_opts(zmq_opts) - -ZMQ_CTX = None # ZeroMQ Context, must be global. -matchmaker = None # memoized matchmaker object - - -def _serialize(data): - """Serialization wrapper. - - We prefer using JSON, but it cannot encode all types. - Error if a developer passes us bad data. - """ - try: - return jsonutils.dumps(data, ensure_ascii=True) - except TypeError: - with excutils.save_and_reraise_exception(): - LOG.error(_("JSON serialization failed.")) - - -def _deserialize(data): - """Deserialization wrapper.""" - LOG.debug(_("Deserializing: %s"), data) - return jsonutils.loads(data) - - -class ZmqSocket(object): - """A tiny wrapper around ZeroMQ. - - Simplifies the send/recv protocol and connection management. - Can be used as a Context (supports the 'with' statement). - """ - - def __init__(self, addr, zmq_type, bind=True, subscribe=None): - self.sock = _get_ctxt().socket(zmq_type) - self.addr = addr - self.type = zmq_type - self.subscriptions = [] - - # Support failures on sending/receiving on wrong socket type. - self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) - self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) - self.can_sub = zmq_type in (zmq.SUB, ) - - # Support list, str, & None for subscribe arg (cast to list) - do_sub = { - list: subscribe, - str: [subscribe], - type(None): [] - }[type(subscribe)] - - for f in do_sub: - self.subscribe(f) - - str_data = {'addr': addr, 'type': self.socket_s(), - 'subscribe': subscribe, 'bind': bind} - - LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) - LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) - LOG.debug(_("-> bind: %(bind)s"), str_data) - - try: - if bind: - self.sock.bind(addr) - else: - self.sock.connect(addr) - except Exception: - raise RPCException(_("Could not open socket.")) - - def socket_s(self): - """Get socket type as string.""" - t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER', - 'DEALER') - return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type] - - def subscribe(self, msg_filter): - """Subscribe.""" - if not self.can_sub: - raise RPCException("Cannot subscribe on this socket.") - LOG.debug(_("Subscribing to %s"), msg_filter) - - try: - self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) - except Exception: - return - - self.subscriptions.append(msg_filter) - - def unsubscribe(self, msg_filter): - """Unsubscribe.""" - if msg_filter not in self.subscriptions: - return - self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter) - self.subscriptions.remove(msg_filter) - - def close(self): - if self.sock is None or self.sock.closed: - return - - # We must unsubscribe, or we'll leak descriptors. - if self.subscriptions: - for f in self.subscriptions: - try: - self.sock.setsockopt(zmq.UNSUBSCRIBE, f) - except Exception: - pass - self.subscriptions = [] - - try: - # Default is to linger - self.sock.close() - except Exception: - # While this is a bad thing to happen, - # it would be much worse if some of the code calling this - # were to fail. For now, lets log, and later evaluate - # if we can safely raise here. - LOG.error(_("ZeroMQ socket could not be closed.")) - self.sock = None - - def recv(self, **kwargs): - if not self.can_recv: - raise RPCException(_("You cannot recv on this socket.")) - return self.sock.recv_multipart(**kwargs) - - def send(self, data, **kwargs): - if not self.can_send: - raise RPCException(_("You cannot send on this socket.")) - self.sock.send_multipart(data, **kwargs) - - -class ZmqClient(object): - """Client for ZMQ sockets.""" - - def __init__(self, addr): - self.outq = ZmqSocket(addr, zmq.PUSH, bind=False) - - def cast(self, msg_id, topic, data, envelope): - msg_id = msg_id or 0 - - if not envelope: - self.outq.send(map(bytes, - (msg_id, topic, 'cast', _serialize(data)))) - return - - rpc_envelope = rpc_common.serialize_msg(data[1], envelope) - zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) - self.outq.send(map(bytes, - (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) - - def close(self): - self.outq.close() - - -class RpcContext(rpc_common.CommonRpcContext): - """Context that supports replying to a rpc.call.""" - def __init__(self, **kwargs): - self.replies = [] - super(RpcContext, self).__init__(**kwargs) - - def deepcopy(self): - values = self.to_dict() - values['replies'] = self.replies - return self.__class__(**values) - - def reply(self, reply=None, failure=None, ending=False): - if ending: - return - self.replies.append(reply) - - @classmethod - def marshal(self, ctx): - ctx_data = ctx.to_dict() - return _serialize(ctx_data) - - @classmethod - def unmarshal(self, data): - return RpcContext.from_dict(_deserialize(data)) - - -class InternalContext(object): - """Used by ConsumerBase as a private context for - methods.""" - - def __init__(self, proxy): - self.proxy = proxy - self.msg_waiter = None - - def _get_response(self, ctx, proxy, topic, data): - """Process a curried message and cast the result to topic.""" - LOG.debug(_("Running func with context: %s"), ctx.to_dict()) - data.setdefault('version', None) - data.setdefault('args', {}) - - try: - result = proxy.dispatch( - ctx, data['version'], data['method'], - data.get('namespace'), **data['args']) - return ConsumerBase.normalize_reply(result, ctx.replies) - except greenlet.GreenletExit: - # ignore these since they are just from shutdowns - pass - except rpc_common.ClientException as e: - LOG.debug(_("Expected exception during message handling (%s)") % - e._exc_info[1]) - return {'exc': - rpc_common.serialize_remote_exception(e._exc_info, - log_failure=False)} - except Exception: - LOG.error(_("Exception during message handling")) - return {'exc': - rpc_common.serialize_remote_exception(sys.exc_info())} - - def reply(self, ctx, proxy, - msg_id=None, context=None, topic=None, msg=None): - """Reply to a casted call.""" - # NOTE(ewindisch): context kwarg exists for Grizzly compat. - # this may be able to be removed earlier than - # 'I' if ConsumerBase.process were refactored. - if type(msg) is list: - payload = msg[-1] - else: - payload = msg - - response = ConsumerBase.normalize_reply( - self._get_response(ctx, proxy, topic, payload), - ctx.replies) - - LOG.debug(_("Sending reply")) - _multi_send(_cast, ctx, topic, { - 'method': '-process_reply', - 'args': { - 'msg_id': msg_id, # Include for Folsom compat. - 'response': response - } - }, _msg_id=msg_id) - - -class ConsumerBase(object): - """Base Consumer.""" - - def __init__(self): - self.private_ctx = InternalContext(None) - - @classmethod - def normalize_reply(self, result, replies): - #TODO(ewindisch): re-evaluate and document this method. - if isinstance(result, types.GeneratorType): - return list(result) - elif replies: - return replies - else: - return [result] - - def process(self, proxy, ctx, data): - data.setdefault('version', None) - data.setdefault('args', {}) - - # Method starting with - are - # processed internally. (non-valid method name) - method = data.get('method') - if not method: - LOG.error(_("RPC message did not include method.")) - return - - # Internal method - # uses internal context for safety. - if method == '-reply': - self.private_ctx.reply(ctx, proxy, **data['args']) - return - - proxy.dispatch(ctx, data['version'], - data['method'], data.get('namespace'), **data['args']) - - -class ZmqBaseReactor(ConsumerBase): - """A consumer class implementing a centralized casting broker (PULL-PUSH). - - Used for RoundRobin requests. - """ - - def __init__(self, conf): - super(ZmqBaseReactor, self).__init__() - - self.proxies = {} - self.threads = [] - self.sockets = [] - self.subscribe = {} - - self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) - - def register(self, proxy, in_addr, zmq_type_in, - in_bind=True, subscribe=None): - - LOG.info(_("Registering reactor")) - - if zmq_type_in not in (zmq.PULL, zmq.SUB): - raise RPCException("Bad input socktype") - - # Items push in. - inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, - subscribe=subscribe) - - self.proxies[inq] = proxy - self.sockets.append(inq) - - LOG.info(_("In reactor registered")) - - def consume_in_thread(self): - @excutils.forever_retry_uncaught_exceptions - def _consume(sock): - LOG.info(_("Consuming socket")) - while True: - self.consume(sock) - - for k in self.proxies.keys(): - self.threads.append( - self.pool.spawn(_consume, k) - ) - - def wait(self): - for t in self.threads: - t.wait() - - def close(self): - for s in self.sockets: - s.close() - - for t in self.threads: - t.kill() - - -class ZmqProxy(ZmqBaseReactor): - """A consumer class implementing a topic-based proxy. - - Forwards to IPC sockets. - """ - - def __init__(self, conf): - super(ZmqProxy, self).__init__(conf) - pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\')) - self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep))) - - self.topic_proxy = {} - - def consume(self, sock): - ipc_dir = CONF.rpc_zmq_ipc_dir - - data = sock.recv(copy=False) - topic = data[1].bytes - - if topic.startswith('fanout~'): - sock_type = zmq.PUB - topic = topic.split('.', 1)[0] - elif topic.startswith('zmq_replies'): - sock_type = zmq.PUB - else: - sock_type = zmq.PUSH - - if topic not in self.topic_proxy: - def publisher(waiter): - LOG.info(_("Creating proxy for topic: %s"), topic) - - try: - # The topic is received over the network, - # don't trust this input. - if self.badchars.search(topic) is not None: - emsg = _("Topic contained dangerous characters.") - LOG.warn(emsg) - raise RPCException(emsg) - - out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % - (ipc_dir, topic), - sock_type, bind=True) - except RPCException: - waiter.send_exception(*sys.exc_info()) - return - - self.topic_proxy[topic] = eventlet.queue.LightQueue( - CONF.rpc_zmq_topic_backlog) - self.sockets.append(out_sock) - - # It takes some time for a pub socket to open, - # before we can have any faith in doing a send() to it. - if sock_type == zmq.PUB: - eventlet.sleep(.5) - - waiter.send(True) - - while(True): - data = self.topic_proxy[topic].get() - out_sock.send(data, copy=False) - - wait_sock_creation = eventlet.event.Event() - eventlet.spawn(publisher, wait_sock_creation) - - try: - wait_sock_creation.wait() - except RPCException: - LOG.error(_("Topic socket file creation failed.")) - return - - try: - self.topic_proxy[topic].put_nowait(data) - except eventlet.queue.Full: - LOG.error(_("Local per-topic backlog buffer full for topic " - "%(topic)s. Dropping message.") % {'topic': topic}) - - def consume_in_thread(self): - """Runs the ZmqProxy service.""" - ipc_dir = CONF.rpc_zmq_ipc_dir - consume_in = "tcp://%s:%s" % \ - (CONF.rpc_zmq_bind_address, - CONF.rpc_zmq_port) - consumption_proxy = InternalContext(None) - - try: - os.makedirs(ipc_dir) - except os.error: - if not os.path.isdir(ipc_dir): - with excutils.save_and_reraise_exception(): - LOG.error(_("Required IPC directory does not exist at" - " %s") % (ipc_dir, )) - try: - self.register(consumption_proxy, - consume_in, - zmq.PULL) - except zmq.ZMQError: - if os.access(ipc_dir, os.X_OK): - with excutils.save_and_reraise_exception(): - LOG.error(_("Permission denied to IPC directory at" - " %s") % (ipc_dir, )) - with excutils.save_and_reraise_exception(): - LOG.error(_("Could not create ZeroMQ receiver daemon. " - "Socket may already be in use.")) - - super(ZmqProxy, self).consume_in_thread() - - -def unflatten_envelope(packenv): - """Unflattens the RPC envelope. - - Takes a list and returns a dictionary. - i.e. [1,2,3,4] => {1: 2, 3: 4} - """ - i = iter(packenv) - h = {} - try: - while True: - k = i.next() - h[k] = i.next() - except StopIteration: - return h - - -class ZmqReactor(ZmqBaseReactor): - """A consumer class implementing a consumer for messages. - - Can also be used as a 1:1 proxy - """ - - def __init__(self, conf): - super(ZmqReactor, self).__init__(conf) - - def consume(self, sock): - #TODO(ewindisch): use zero-copy (i.e. references, not copying) - data = sock.recv() - LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) - - proxy = self.proxies[sock] - - if data[2] == 'cast': # Legacy protocol - packenv = data[3] - - ctx, msg = _deserialize(packenv) - request = rpc_common.deserialize_msg(msg) - ctx = RpcContext.unmarshal(ctx) - elif data[2] == 'impl_zmq_v2': - packenv = data[4:] - - msg = unflatten_envelope(packenv) - request = rpc_common.deserialize_msg(msg) - - # Unmarshal only after verifying the message. - ctx = RpcContext.unmarshal(data[3]) - else: - LOG.error(_("ZMQ Envelope version unsupported or unknown.")) - return - - self.pool.spawn_n(self.process, proxy, ctx, request) - - -class Connection(rpc_common.Connection): - """Manages connections and threads.""" - - def __init__(self, conf): - self.topics = [] - self.reactor = ZmqReactor(conf) - - def create_consumer(self, topic, proxy, fanout=False): - # Register with matchmaker. - _get_matchmaker().register(topic, CONF.rpc_zmq_host) - - # Subscription scenarios - if fanout: - sock_type = zmq.SUB - subscribe = ('', fanout)[type(fanout) == str] - topic = 'fanout~' + topic.split('.', 1)[0] - else: - sock_type = zmq.PULL - subscribe = None - topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) - - if topic in self.topics: - LOG.info(_("Skipping topic registration. Already registered.")) - return - - # Receive messages from (local) proxy - inaddr = "ipc://%s/zmq_topic_%s" % \ - (CONF.rpc_zmq_ipc_dir, topic) - - LOG.debug(_("Consumer is a zmq.%s"), - ['PULL', 'SUB'][sock_type == zmq.SUB]) - - self.reactor.register(proxy, inaddr, sock_type, - subscribe=subscribe, in_bind=False) - self.topics.append(topic) - - def close(self): - _get_matchmaker().stop_heartbeat() - for topic in self.topics: - _get_matchmaker().unregister(topic, CONF.rpc_zmq_host) - - self.reactor.close() - self.topics = [] - - def wait(self): - self.reactor.wait() - - def consume_in_thread(self): - _get_matchmaker().start_heartbeat() - self.reactor.consume_in_thread() - - -def _cast(addr, context, topic, msg, timeout=None, envelope=False, - _msg_id=None): - timeout_cast = timeout or CONF.rpc_cast_timeout - payload = [RpcContext.marshal(context), msg] - - with Timeout(timeout_cast, exception=rpc_common.Timeout): - try: - conn = ZmqClient(addr) - - # assumes cast can't return an exception - conn.cast(_msg_id, topic, payload, envelope) - except zmq.ZMQError: - raise RPCException("Cast failed. ZMQ Socket Exception") - finally: - if 'conn' in vars(): - conn.close() - - -def _call(addr, context, topic, msg, timeout=None, - envelope=False): - # timeout_response is how long we wait for a response - timeout = timeout or CONF.rpc_response_timeout - - # The msg_id is used to track replies. - msg_id = uuid.uuid4().hex - - # Replies always come into the reply service. - reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host - - LOG.debug(_("Creating payload")) - # Curry the original request into a reply method. - mcontext = RpcContext.marshal(context) - payload = { - 'method': '-reply', - 'args': { - 'msg_id': msg_id, - 'topic': reply_topic, - # TODO(ewindisch): safe to remove mcontext in I. - 'msg': [mcontext, msg] - } - } - - LOG.debug(_("Creating queue socket for reply waiter")) - - # Messages arriving async. - # TODO(ewindisch): have reply consumer with dynamic subscription mgmt - with Timeout(timeout, exception=rpc_common.Timeout): - try: - msg_waiter = ZmqSocket( - "ipc://%s/zmq_topic_zmq_replies.%s" % - (CONF.rpc_zmq_ipc_dir, - CONF.rpc_zmq_host), - zmq.SUB, subscribe=msg_id, bind=False - ) - - LOG.debug(_("Sending cast")) - _cast(addr, context, topic, payload, envelope) - - LOG.debug(_("Cast sent; Waiting reply")) - # Blocks until receives reply - msg = msg_waiter.recv() - LOG.debug(_("Received message: %s"), msg) - LOG.debug(_("Unpacking response")) - - if msg[2] == 'cast': # Legacy version - raw_msg = _deserialize(msg[-1])[-1] - elif msg[2] == 'impl_zmq_v2': - rpc_envelope = unflatten_envelope(msg[4:]) - raw_msg = rpc_common.deserialize_msg(rpc_envelope) - else: - raise rpc_common.UnsupportedRpcEnvelopeVersion( - _("Unsupported or unknown ZMQ envelope returned.")) - - responses = raw_msg['args']['response'] - # ZMQError trumps the Timeout error. - except zmq.ZMQError: - raise RPCException("ZMQ Socket Error") - except (IndexError, KeyError): - raise RPCException(_("RPC Message Invalid.")) - finally: - if 'msg_waiter' in vars(): - msg_waiter.close() - - # It seems we don't need to do all of the following, - # but perhaps it would be useful for multicall? - # One effect of this is that we're checking all - # responses for Exceptions. - for resp in responses: - if isinstance(resp, types.DictType) and 'exc' in resp: - raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) - - return responses[-1] - - -def _multi_send(method, context, topic, msg, timeout=None, - envelope=False, _msg_id=None): - """Wraps the sending of messages. - - Dispatches to the matchmaker and sends message to all relevant hosts. - """ - conf = CONF - LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) - - queues = _get_matchmaker().queues(topic) - LOG.debug(_("Sending message(s) to: %s"), queues) - - # Don't stack if we have no matchmaker results - if not queues: - LOG.warn(_("No matchmaker results. Not casting.")) - # While not strictly a timeout, callers know how to handle - # this exception and a timeout isn't too big a lie. - raise rpc_common.Timeout(_("No match from matchmaker.")) - - # This supports brokerless fanout (addresses > 1) - for queue in queues: - (_topic, ip_addr) = queue - _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) - - if method.__name__ == '_cast': - eventlet.spawn_n(method, _addr, context, - _topic, msg, timeout, envelope, - _msg_id) - return - return method(_addr, context, _topic, msg, timeout, - envelope) - - -def create_connection(conf, new=True): - return Connection(conf) - - -def multicall(conf, *args, **kwargs): - """Multiple calls.""" - return _multi_send(_call, *args, **kwargs) - - -def call(conf, *args, **kwargs): - """Send a message, expect a response.""" - data = _multi_send(_call, *args, **kwargs) - return data[-1] - - -def cast(conf, *args, **kwargs): - """Send a message expecting no reply.""" - _multi_send(_cast, *args, **kwargs) - - -def fanout_cast(conf, context, topic, msg, **kwargs): - """Send a message to all listening and expect no reply.""" - # NOTE(ewindisch): fanout~ is used because it avoid splitting on . - # and acts as a non-subtle hint to the matchmaker and ZmqProxy. - _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) - - -def notify(conf, context, topic, msg, envelope): - """Send notification event. - - Notifications are sent to topic-priority. - This differs from the AMQP drivers which send to topic.priority. - """ - # NOTE(ewindisch): dot-priority in rpc notifier does not - # work with our assumptions. - topic = topic.replace('.', '-') - cast(conf, context, topic, msg, envelope=envelope) - - -def cleanup(): - """Clean up resources in use by implementation.""" - global ZMQ_CTX - if ZMQ_CTX: - ZMQ_CTX.term() - ZMQ_CTX = None - - global matchmaker - matchmaker = None - - -def _get_ctxt(): - if not zmq: - raise ImportError("Failed to import eventlet.green.zmq") - - global ZMQ_CTX - if not ZMQ_CTX: - ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts) - return ZMQ_CTX - - -def _get_matchmaker(*args, **kwargs): - global matchmaker - if not matchmaker: - mm = CONF.rpc_zmq_matchmaker - if mm.endswith('matchmaker.MatchMakerRing'): - mm.replace('matchmaker', 'matchmaker_ring') - LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use' - ' %(new)s instead') % dict( - orig=CONF.rpc_zmq_matchmaker, new=mm)) - matchmaker = importutils.import_object(mm, *args, **kwargs) - return matchmaker diff --git a/libra/openstack/common/rpc/matchmaker.py b/libra/openstack/common/rpc/matchmaker.py deleted file mode 100644 index cda9e01f..00000000 --- a/libra/openstack/common/rpc/matchmaker.py +++ /dev/null @@ -1,324 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -The MatchMaker classes should except a Topic or Fanout exchange key and -return keys for direct exchanges, per (approximate) AMQP parlance. -""" - -import contextlib - -import eventlet -from oslo.config import cfg - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging - - -matchmaker_opts = [ - cfg.IntOpt('matchmaker_heartbeat_freq', - default=300, - help='Heartbeat frequency'), - cfg.IntOpt('matchmaker_heartbeat_ttl', - default=600, - help='Heartbeat time-to-live.'), -] - -CONF = cfg.CONF -CONF.register_opts(matchmaker_opts) -LOG = logging.getLogger(__name__) -contextmanager = contextlib.contextmanager - - -class MatchMakerException(Exception): - """Signified a match could not be found.""" - message = _("Match not found by MatchMaker.") - - -class Exchange(object): - """Implements lookups. - - Subclass this to support hashtables, dns, etc. - """ - def __init__(self): - pass - - def run(self, key): - raise NotImplementedError() - - -class Binding(object): - """A binding on which to perform a lookup.""" - def __init__(self): - pass - - def test(self, key): - raise NotImplementedError() - - -class MatchMakerBase(object): - """Match Maker Base Class. - - Build off HeartbeatMatchMakerBase if building a heartbeat-capable - MatchMaker. - """ - def __init__(self): - # Array of tuples. Index [2] toggles negation, [3] is last-if-true - self.bindings = [] - - self.no_heartbeat_msg = _('Matchmaker does not implement ' - 'registration or heartbeat.') - - def register(self, key, host): - """Register a host on a backend. - - Heartbeats, if applicable, may keepalive registration. - """ - pass - - def ack_alive(self, key, host): - """Acknowledge that a key.host is alive. - - Used internally for updating heartbeats, but may also be used - publically to acknowledge a system is alive (i.e. rpc message - successfully sent to host) - """ - pass - - def is_alive(self, topic, host): - """Checks if a host is alive.""" - pass - - def expire(self, topic, host): - """Explicitly expire a host's registration.""" - pass - - def send_heartbeats(self): - """Send all heartbeats. - - Use start_heartbeat to spawn a heartbeat greenthread, - which loops this method. - """ - pass - - def unregister(self, key, host): - """Unregister a topic.""" - pass - - def start_heartbeat(self): - """Spawn heartbeat greenthread.""" - pass - - def stop_heartbeat(self): - """Destroys the heartbeat greenthread.""" - pass - - def add_binding(self, binding, rule, last=True): - self.bindings.append((binding, rule, False, last)) - - #NOTE(ewindisch): kept the following method in case we implement the - # underlying support. - #def add_negate_binding(self, binding, rule, last=True): - # self.bindings.append((binding, rule, True, last)) - - def queues(self, key): - workers = [] - - # bit is for negate bindings - if we choose to implement it. - # last stops processing rules if this matches. - for (binding, exchange, bit, last) in self.bindings: - if binding.test(key): - workers.extend(exchange.run(key)) - - # Support last. - if last: - return workers - return workers - - -class HeartbeatMatchMakerBase(MatchMakerBase): - """Base for a heart-beat capable MatchMaker. - - Provides common methods for registering, unregistering, and maintaining - heartbeats. - """ - def __init__(self): - self.hosts = set() - self._heart = None - self.host_topic = {} - - super(HeartbeatMatchMakerBase, self).__init__() - - def send_heartbeats(self): - """Send all heartbeats. - - Use start_heartbeat to spawn a heartbeat greenthread, - which loops this method. - """ - for key, host in self.host_topic: - self.ack_alive(key, host) - - def ack_alive(self, key, host): - """Acknowledge that a host.topic is alive. - - Used internally for updating heartbeats, but may also be used - publically to acknowledge a system is alive (i.e. rpc message - successfully sent to host) - """ - raise NotImplementedError("Must implement ack_alive") - - def backend_register(self, key, host): - """Implements registration logic. - - Called by register(self,key,host) - """ - raise NotImplementedError("Must implement backend_register") - - def backend_unregister(self, key, key_host): - """Implements de-registration logic. - - Called by unregister(self,key,host) - """ - raise NotImplementedError("Must implement backend_unregister") - - def register(self, key, host): - """Register a host on a backend. - - Heartbeats, if applicable, may keepalive registration. - """ - self.hosts.add(host) - self.host_topic[(key, host)] = host - key_host = '.'.join((key, host)) - - self.backend_register(key, key_host) - - self.ack_alive(key, host) - - def unregister(self, key, host): - """Unregister a topic.""" - if (key, host) in self.host_topic: - del self.host_topic[(key, host)] - - self.hosts.discard(host) - self.backend_unregister(key, '.'.join((key, host))) - - LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"), - {'key': key, 'host': host}) - - def start_heartbeat(self): - """Implementation of MatchMakerBase.start_heartbeat. - - Launches greenthread looping send_heartbeats(), - yielding for CONF.matchmaker_heartbeat_freq seconds - between iterations. - """ - if not self.hosts: - raise MatchMakerException( - _("Register before starting heartbeat.")) - - def do_heartbeat(): - while True: - self.send_heartbeats() - eventlet.sleep(CONF.matchmaker_heartbeat_freq) - - self._heart = eventlet.spawn(do_heartbeat) - - def stop_heartbeat(self): - """Destroys the heartbeat greenthread.""" - if self._heart: - self._heart.kill() - - -class DirectBinding(Binding): - """Specifies a host in the key via a '.' character. - - Although dots are used in the key, the behavior here is - that it maps directly to a host, thus direct. - """ - def test(self, key): - return '.' in key - - -class TopicBinding(Binding): - """Where a 'bare' key without dots. - - AMQP generally considers topic exchanges to be those *with* dots, - but we deviate here in terminology as the behavior here matches - that of a topic exchange (whereas where there are dots, behavior - matches that of a direct exchange. - """ - def test(self, key): - return '.' not in key - - -class FanoutBinding(Binding): - """Match on fanout keys, where key starts with 'fanout.' string.""" - def test(self, key): - return key.startswith('fanout~') - - -class StubExchange(Exchange): - """Exchange that does nothing.""" - def run(self, key): - return [(key, None)] - - -class LocalhostExchange(Exchange): - """Exchange where all direct topics are local.""" - def __init__(self, host='localhost'): - self.host = host - super(Exchange, self).__init__() - - def run(self, key): - return [('.'.join((key.split('.')[0], self.host)), self.host)] - - -class DirectExchange(Exchange): - """Exchange where all topic keys are split, sending to second half. - - i.e. "compute.host" sends a message to "compute.host" running on "host" - """ - def __init__(self): - super(Exchange, self).__init__() - - def run(self, key): - e = key.split('.', 1)[1] - return [(key, e)] - - -class MatchMakerLocalhost(MatchMakerBase): - """Match Maker where all bare topics resolve to localhost. - - Useful for testing. - """ - def __init__(self, host='localhost'): - super(MatchMakerLocalhost, self).__init__() - self.add_binding(FanoutBinding(), LocalhostExchange(host)) - self.add_binding(DirectBinding(), DirectExchange()) - self.add_binding(TopicBinding(), LocalhostExchange(host)) - - -class MatchMakerStub(MatchMakerBase): - """Match Maker where topics are untouched. - - Useful for testing, or for AMQP/brokered queues. - Will not work where knowledge of hosts is known (i.e. zeromq) - """ - def __init__(self): - super(MatchMakerStub, self).__init__() - - self.add_binding(FanoutBinding(), StubExchange()) - self.add_binding(DirectBinding(), StubExchange()) - self.add_binding(TopicBinding(), StubExchange()) diff --git a/libra/openstack/common/rpc/matchmaker_redis.py b/libra/openstack/common/rpc/matchmaker_redis.py deleted file mode 100644 index 7e9e096f..00000000 --- a/libra/openstack/common/rpc/matchmaker_redis.py +++ /dev/null @@ -1,145 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -The MatchMaker classes should accept a Topic or Fanout exchange key and -return keys for direct exchanges, per (approximate) AMQP parlance. -""" - -from oslo.config import cfg - -from libra.openstack.common import importutils -from libra.openstack.common import log as logging -from libra.openstack.common.rpc import matchmaker as mm_common - -redis = importutils.try_import('redis') - - -matchmaker_redis_opts = [ - cfg.StrOpt('host', - default='127.0.0.1', - help='Host to locate redis'), - cfg.IntOpt('port', - default=6379, - help='Use this port to connect to redis host.'), - cfg.StrOpt('password', - default=None, - help='Password for Redis server. (optional)'), -] - -CONF = cfg.CONF -opt_group = cfg.OptGroup(name='matchmaker_redis', - title='Options for Redis-based MatchMaker') -CONF.register_group(opt_group) -CONF.register_opts(matchmaker_redis_opts, opt_group) -LOG = logging.getLogger(__name__) - - -class RedisExchange(mm_common.Exchange): - def __init__(self, matchmaker): - self.matchmaker = matchmaker - self.redis = matchmaker.redis - super(RedisExchange, self).__init__() - - -class RedisTopicExchange(RedisExchange): - """Exchange where all topic keys are split, sending to second half. - - i.e. "compute.host" sends a message to "compute" running on "host" - """ - def run(self, topic): - while True: - member_name = self.redis.srandmember(topic) - - if not member_name: - # If this happens, there are no - # longer any members. - break - - if not self.matchmaker.is_alive(topic, member_name): - continue - - host = member_name.split('.', 1)[1] - return [(member_name, host)] - return [] - - -class RedisFanoutExchange(RedisExchange): - """Return a list of all hosts.""" - def run(self, topic): - topic = topic.split('~', 1)[1] - hosts = self.redis.smembers(topic) - good_hosts = filter( - lambda host: self.matchmaker.is_alive(topic, host), hosts) - - return [(x, x.split('.', 1)[1]) for x in good_hosts] - - -class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): - """MatchMaker registering and looking-up hosts with a Redis server.""" - def __init__(self): - super(MatchMakerRedis, self).__init__() - - if not redis: - raise ImportError("Failed to import module redis.") - - self.redis = redis.StrictRedis( - host=CONF.matchmaker_redis.host, - port=CONF.matchmaker_redis.port, - password=CONF.matchmaker_redis.password) - - self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self)) - self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange()) - self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self)) - - def ack_alive(self, key, host): - topic = "%s.%s" % (key, host) - if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl): - # If we could not update the expiration, the key - # might have been pruned. Re-register, creating a new - # key in Redis. - self.register(self.topic_host[host], host) - - def is_alive(self, topic, host): - if self.redis.ttl(host) == -1: - self.expire(topic, host) - return False - return True - - def expire(self, topic, host): - with self.redis.pipeline() as pipe: - pipe.multi() - pipe.delete(host) - pipe.srem(topic, host) - pipe.execute() - - def backend_register(self, key, key_host): - with self.redis.pipeline() as pipe: - pipe.multi() - pipe.sadd(key, key_host) - - # No value is needed, we just - # care if it exists. Sets aren't viable - # because only keys can expire. - pipe.set(key_host, '') - - pipe.execute() - - def backend_unregister(self, key, key_host): - with self.redis.pipeline() as pipe: - pipe.multi() - pipe.srem(key, key_host) - pipe.delete(key_host) - pipe.execute() diff --git a/libra/openstack/common/rpc/matchmaker_ring.py b/libra/openstack/common/rpc/matchmaker_ring.py deleted file mode 100644 index b9e0ca4c..00000000 --- a/libra/openstack/common/rpc/matchmaker_ring.py +++ /dev/null @@ -1,108 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011-2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -The MatchMaker classes should except a Topic or Fanout exchange key and -return keys for direct exchanges, per (approximate) AMQP parlance. -""" - -import itertools -import json - -from oslo.config import cfg - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging -from libra.openstack.common.rpc import matchmaker as mm - - -matchmaker_opts = [ - # Matchmaker ring file - cfg.StrOpt('ringfile', - deprecated_name='matchmaker_ringfile', - deprecated_group='DEFAULT', - default='/etc/oslo/matchmaker_ring.json', - help='Matchmaker ring file (JSON)'), -] - -CONF = cfg.CONF -CONF.register_opts(matchmaker_opts, 'matchmaker_ring') -LOG = logging.getLogger(__name__) - - -class RingExchange(mm.Exchange): - """Match Maker where hosts are loaded from a static JSON formatted file. - - __init__ takes optional ring dictionary argument, otherwise - loads the ringfile from CONF.mathcmaker_ringfile. - """ - def __init__(self, ring=None): - super(RingExchange, self).__init__() - - if ring: - self.ring = ring - else: - fh = open(CONF.matchmaker_ring.ringfile, 'r') - self.ring = json.load(fh) - fh.close() - - self.ring0 = {} - for k in self.ring.keys(): - self.ring0[k] = itertools.cycle(self.ring[k]) - - def _ring_has(self, key): - return key in self.ring0 - - -class RoundRobinRingExchange(RingExchange): - """A Topic Exchange based on a hashmap.""" - def __init__(self, ring=None): - super(RoundRobinRingExchange, self).__init__(ring) - - def run(self, key): - if not self._ring_has(key): - LOG.warn( - _("No key defining hosts for topic '%s', " - "see ringfile") % (key, ) - ) - return [] - host = next(self.ring0[key]) - return [(key + '.' + host, host)] - - -class FanoutRingExchange(RingExchange): - """Fanout Exchange based on a hashmap.""" - def __init__(self, ring=None): - super(FanoutRingExchange, self).__init__(ring) - - def run(self, key): - # Assume starts with "fanout~", strip it for lookup. - nkey = key.split('fanout~')[1:][0] - if not self._ring_has(nkey): - LOG.warn( - _("No key defining hosts for topic '%s', " - "see ringfile") % (nkey, ) - ) - return [] - return map(lambda x: (key + '.' + x, x), self.ring[nkey]) - - -class MatchMakerRing(mm.MatchMakerBase): - """Match Maker where hosts are loaded from a static hashmap.""" - def __init__(self, ring=None): - super(MatchMakerRing, self).__init__() - self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring)) - self.add_binding(mm.DirectBinding(), mm.DirectExchange()) - self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring)) diff --git a/libra/openstack/common/rpc/proxy.py b/libra/openstack/common/rpc/proxy.py deleted file mode 100644 index 8df6e166..00000000 --- a/libra/openstack/common/rpc/proxy.py +++ /dev/null @@ -1,226 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012-2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A helper class for proxy objects to remote APIs. - -For more information about rpc API version numbers, see: - rpc/dispatcher.py -""" - - -from libra.openstack.common import rpc -from libra.openstack.common.rpc import common as rpc_common -from libra.openstack.common.rpc import serializer as rpc_serializer - - -class RpcProxy(object): - """A helper class for rpc clients. - - This class is a wrapper around the RPC client API. It allows you to - specify the topic and API version in a single place. This is intended to - be used as a base class for a class that implements the client side of an - rpc API. - """ - - # The default namespace, which can be overriden in a subclass. - RPC_API_NAMESPACE = None - - def __init__(self, topic, default_version, version_cap=None, - serializer=None): - """Initialize an RpcProxy. - - :param topic: The topic to use for all messages. - :param default_version: The default API version to request in all - outgoing messages. This can be overridden on a per-message - basis. - :param version_cap: Optionally cap the maximum version used for sent - messages. - :param serializer: Optionaly (de-)serialize entities with a - provided helper. - """ - self.topic = topic - self.default_version = default_version - self.version_cap = version_cap - if serializer is None: - serializer = rpc_serializer.NoOpSerializer() - self.serializer = serializer - super(RpcProxy, self).__init__() - - def _set_version(self, msg, vers): - """Helper method to set the version in a message. - - :param msg: The message having a version added to it. - :param vers: The version number to add to the message. - """ - v = vers if vers else self.default_version - if (self.version_cap and not - rpc_common.version_is_compatible(self.version_cap, v)): - raise rpc_common.RpcVersionCapError(version_cap=self.version_cap) - msg['version'] = v - - def _get_topic(self, topic): - """Return the topic to use for a message.""" - return topic if topic else self.topic - - def can_send_version(self, version): - """Check to see if a version is compatible with the version cap.""" - return (not self.version_cap or - rpc_common.version_is_compatible(self.version_cap, version)) - - @staticmethod - def make_namespaced_msg(method, namespace, **kwargs): - return {'method': method, 'namespace': namespace, 'args': kwargs} - - def make_msg(self, method, **kwargs): - return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE, - **kwargs) - - def _serialize_msg_args(self, context, kwargs): - """Helper method called to serialize message arguments. - - This calls our serializer on each argument, returning a new - set of args that have been serialized. - - :param context: The request context - :param kwargs: The arguments to serialize - :returns: A new set of serialized arguments - """ - new_kwargs = dict() - for argname, arg in kwargs.iteritems(): - new_kwargs[argname] = self.serializer.serialize_entity(context, - arg) - return new_kwargs - - def call(self, context, msg, topic=None, version=None, timeout=None): - """rpc.call() a remote method. - - :param context: The request context - :param msg: The message to send, including the method and args. - :param topic: Override the topic for this message. - :param version: (Optional) Override the requested API version in this - message. - :param timeout: (Optional) A timeout to use when waiting for the - response. If no timeout is specified, a default timeout will be - used that is usually sufficient. - - :returns: The return value from the remote method. - """ - self._set_version(msg, version) - msg['args'] = self._serialize_msg_args(context, msg['args']) - real_topic = self._get_topic(topic) - try: - result = rpc.call(context, real_topic, msg, timeout) - return self.serializer.deserialize_entity(context, result) - except rpc.common.Timeout as exc: - raise rpc.common.Timeout( - exc.info, real_topic, msg.get('method')) - - def multicall(self, context, msg, topic=None, version=None, timeout=None): - """rpc.multicall() a remote method. - - :param context: The request context - :param msg: The message to send, including the method and args. - :param topic: Override the topic for this message. - :param version: (Optional) Override the requested API version in this - message. - :param timeout: (Optional) A timeout to use when waiting for the - response. If no timeout is specified, a default timeout will be - used that is usually sufficient. - - :returns: An iterator that lets you process each of the returned values - from the remote method as they arrive. - """ - self._set_version(msg, version) - msg['args'] = self._serialize_msg_args(context, msg['args']) - real_topic = self._get_topic(topic) - try: - result = rpc.multicall(context, real_topic, msg, timeout) - return self.serializer.deserialize_entity(context, result) - except rpc.common.Timeout as exc: - raise rpc.common.Timeout( - exc.info, real_topic, msg.get('method')) - - def cast(self, context, msg, topic=None, version=None): - """rpc.cast() a remote method. - - :param context: The request context - :param msg: The message to send, including the method and args. - :param topic: Override the topic for this message. - :param version: (Optional) Override the requested API version in this - message. - - :returns: None. rpc.cast() does not wait on any return value from the - remote method. - """ - self._set_version(msg, version) - msg['args'] = self._serialize_msg_args(context, msg['args']) - rpc.cast(context, self._get_topic(topic), msg) - - def fanout_cast(self, context, msg, topic=None, version=None): - """rpc.fanout_cast() a remote method. - - :param context: The request context - :param msg: The message to send, including the method and args. - :param topic: Override the topic for this message. - :param version: (Optional) Override the requested API version in this - message. - - :returns: None. rpc.fanout_cast() does not wait on any return value - from the remote method. - """ - self._set_version(msg, version) - msg['args'] = self._serialize_msg_args(context, msg['args']) - rpc.fanout_cast(context, self._get_topic(topic), msg) - - def cast_to_server(self, context, server_params, msg, topic=None, - version=None): - """rpc.cast_to_server() a remote method. - - :param context: The request context - :param server_params: Server parameters. See rpc.cast_to_server() for - details. - :param msg: The message to send, including the method and args. - :param topic: Override the topic for this message. - :param version: (Optional) Override the requested API version in this - message. - - :returns: None. rpc.cast_to_server() does not wait on any - return values. - """ - self._set_version(msg, version) - msg['args'] = self._serialize_msg_args(context, msg['args']) - rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) - - def fanout_cast_to_server(self, context, server_params, msg, topic=None, - version=None): - """rpc.fanout_cast_to_server() a remote method. - - :param context: The request context - :param server_params: Server parameters. See rpc.cast_to_server() for - details. - :param msg: The message to send, including the method and args. - :param topic: Override the topic for this message. - :param version: (Optional) Override the requested API version in this - message. - - :returns: None. rpc.fanout_cast_to_server() does not wait on any - return values. - """ - self._set_version(msg, version) - msg['args'] = self._serialize_msg_args(context, msg['args']) - rpc.fanout_cast_to_server(context, server_params, - self._get_topic(topic), msg) diff --git a/libra/openstack/common/rpc/securemessage.py b/libra/openstack/common/rpc/securemessage.py deleted file mode 100644 index 92c8963b..00000000 --- a/libra/openstack/common/rpc/securemessage.py +++ /dev/null @@ -1,521 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import collections -import os -import struct -import time - -import requests - -from oslo.config import cfg - -from libra.openstack.common.crypto import utils as cryptoutils -from libra.openstack.common import jsonutils -from libra.openstack.common import log as logging - -secure_message_opts = [ - cfg.BoolOpt('enabled', default=True, - help='Whether Secure Messaging (Signing) is enabled,' - ' defaults to enabled'), - cfg.BoolOpt('enforced', default=False, - help='Whether Secure Messaging (Signing) is enforced,' - ' defaults to not enforced'), - cfg.BoolOpt('encrypt', default=False, - help='Whether Secure Messaging (Encryption) is enabled,' - ' defaults to not enabled'), - cfg.StrOpt('secret_keys_file', - help='Path to the file containing the keys, takes precedence' - ' over secret_key'), - cfg.MultiStrOpt('secret_key', - help='A list of keys: (ex: name:),' - ' ignored if secret_keys_file is set'), - cfg.StrOpt('kds_endpoint', - help='KDS endpoint (ex: http://kds.example.com:35357/v3)'), -] -secure_message_group = cfg.OptGroup('secure_messages', - title='Secure Messaging options') - -LOG = logging.getLogger(__name__) - - -class SecureMessageException(Exception): - """Generic Exception for Secure Messages.""" - - msg = "An unknown Secure Message related exception occurred." - - def __init__(self, msg=None): - if msg is None: - msg = self.msg - super(SecureMessageException, self).__init__(msg) - - -class SharedKeyNotFound(SecureMessageException): - """No shared key was found and no other external authentication mechanism - is available. - """ - - msg = "Shared Key for [%s] Not Found. (%s)" - - def __init__(self, name, errmsg): - super(SharedKeyNotFound, self).__init__(self.msg % (name, errmsg)) - - -class InvalidMetadata(SecureMessageException): - """The metadata is invalid.""" - - msg = "Invalid metadata: %s" - - def __init__(self, err): - super(InvalidMetadata, self).__init__(self.msg % err) - - -class InvalidSignature(SecureMessageException): - """Signature validation failed.""" - - msg = "Failed to validate signature (source=%s, destination=%s)" - - def __init__(self, src, dst): - super(InvalidSignature, self).__init__(self.msg % (src, dst)) - - -class UnknownDestinationName(SecureMessageException): - """The Destination name is unknown to us.""" - - msg = "Invalid destination name (%s)" - - def __init__(self, name): - super(UnknownDestinationName, self).__init__(self.msg % name) - - -class InvalidEncryptedTicket(SecureMessageException): - """The Encrypted Ticket could not be successfully handled.""" - - msg = "Invalid Ticket (source=%s, destination=%s)" - - def __init__(self, src, dst): - super(InvalidEncryptedTicket, self).__init__(self.msg % (src, dst)) - - -class InvalidExpiredTicket(SecureMessageException): - """The ticket received is already expired.""" - - msg = "Expired ticket (source=%s, destination=%s)" - - def __init__(self, src, dst): - super(InvalidExpiredTicket, self).__init__(self.msg % (src, dst)) - - -class CommunicationError(SecureMessageException): - """The Communication with the KDS failed.""" - - msg = "Communication Error (target=%s): %s" - - def __init__(self, target, errmsg): - super(CommunicationError, self).__init__(self.msg % (target, errmsg)) - - -class InvalidArgument(SecureMessageException): - """Bad initialization argument.""" - - msg = "Invalid argument: %s" - - def __init__(self, errmsg): - super(InvalidArgument, self).__init__(self.msg % errmsg) - - -Ticket = collections.namedtuple('Ticket', ['skey', 'ekey', 'esek']) - - -class KeyStore(object): - """A storage class for Signing and Encryption Keys. - - This class creates an object that holds Generic Keys like Signing - Keys, Encryption Keys, Encrypted SEK Tickets ... - """ - - def __init__(self): - self._kvps = dict() - - def _get_key_name(self, source, target, ktype): - return (source, target, ktype) - - def _put(self, src, dst, ktype, expiration, data): - name = self._get_key_name(src, dst, ktype) - self._kvps[name] = (expiration, data) - - def _get(self, src, dst, ktype): - name = self._get_key_name(src, dst, ktype) - if name in self._kvps: - expiration, data = self._kvps[name] - if expiration > time.time(): - return data - else: - del self._kvps[name] - - return None - - def clear(self): - """Wipes the store clear of all data.""" - self._kvps.clear() - - def put_ticket(self, source, target, skey, ekey, esek, expiration): - """Puts a sek pair in the cache. - - :param source: Client name - :param target: Target name - :param skey: The Signing Key - :param ekey: The Encription Key - :param esek: The token encrypted with the target key - :param expiration: Expiration time in seconds since Epoch - """ - keys = Ticket(skey, ekey, esek) - self._put(source, target, 'ticket', expiration, keys) - - def get_ticket(self, source, target): - """Returns a Ticket (skey, ekey, esek) namedtuple for the - source/target pair. - """ - return self._get(source, target, 'ticket') - - -_KEY_STORE = KeyStore() - - -class _KDSClient(object): - - USER_AGENT = 'oslo-incubator/rpc' - - def __init__(self, endpoint=None, timeout=None): - """A KDS Client class.""" - - self._endpoint = endpoint - if timeout is not None: - self.timeout = float(timeout) - else: - self.timeout = None - - def _do_get(self, url, request): - req_kwargs = dict() - req_kwargs['headers'] = dict() - req_kwargs['headers']['User-Agent'] = self.USER_AGENT - req_kwargs['headers']['Content-Type'] = 'application/json' - req_kwargs['data'] = jsonutils.dumps({'request': request}) - if self.timeout is not None: - req_kwargs['timeout'] = self.timeout - - try: - resp = requests.get(url, **req_kwargs) - except requests.ConnectionError as e: - err = "Unable to establish connection. %s" % e - raise CommunicationError(url, err) - - return resp - - def _get_reply(self, url, resp): - if resp.text: - try: - body = jsonutils.loads(resp.text) - reply = body['reply'] - except (KeyError, TypeError, ValueError): - msg = "Failed to decode reply: %s" % resp.text - raise CommunicationError(url, msg) - else: - msg = "No reply data was returned." - raise CommunicationError(url, msg) - - return reply - - def _get_ticket(self, request, url=None, redirects=10): - """Send an HTTP request. - - Wraps around 'requests' to handle redirects and common errors. - """ - if url is None: - if not self._endpoint: - raise CommunicationError(url, 'Endpoint not configured') - url = self._endpoint + '/kds/ticket' - - while redirects: - resp = self._do_get(url, request) - if resp.status_code in (301, 302, 305): - # Redirected. Reissue the request to the new location. - url = resp.headers['location'] - redirects -= 1 - continue - elif resp.status_code != 200: - msg = "Request returned failure status: %s (%s)" - err = msg % (resp.status_code, resp.text) - raise CommunicationError(url, err) - - return self._get_reply(url, resp) - - raise CommunicationError(url, "Too many redirections, giving up!") - - def get_ticket(self, source, target, crypto, key): - - # prepare metadata - md = {'requestor': source, - 'target': target, - 'timestamp': time.time(), - 'nonce': struct.unpack('Q', os.urandom(8))[0]} - metadata = base64.b64encode(jsonutils.dumps(md)) - - # sign metadata - signature = crypto.sign(key, metadata) - - # HTTP request - reply = self._get_ticket({'metadata': metadata, - 'signature': signature}) - - # verify reply - signature = crypto.sign(key, (reply['metadata'] + reply['ticket'])) - if signature != reply['signature']: - raise InvalidEncryptedTicket(md['source'], md['destination']) - md = jsonutils.loads(base64.b64decode(reply['metadata'])) - if ((md['source'] != source or - md['destination'] != target or - md['expiration'] < time.time())): - raise InvalidEncryptedTicket(md['source'], md['destination']) - - # return ticket data - tkt = jsonutils.loads(crypto.decrypt(key, reply['ticket'])) - - return tkt, md['expiration'] - - -# we need to keep a global nonce, as this value should never repeat non -# matter how many SecureMessage objects we create -_NONCE = None - - -def _get_nonce(): - """We keep a single counter per instance, as it is so huge we can't - possibly cycle through within 1/100 of a second anyway. - """ - - global _NONCE - # Lazy initialize, for now get a random value, multiply by 2^32 and - # use it as the nonce base. The counter itself will rotate after - # 2^32 increments. - if _NONCE is None: - _NONCE = [struct.unpack('I', os.urandom(4))[0], 0] - - # Increment counter and wrap at 2^32 - _NONCE[1] += 1 - if _NONCE[1] > 0xffffffff: - _NONCE[1] = 0 - - # Return base + counter - return long((_NONCE[0] * 0xffffffff)) + _NONCE[1] - - -class SecureMessage(object): - """A Secure Message object. - - This class creates a signing/encryption facility for RPC messages. - It encapsulates all the necessary crypto primitives to insulate - regular code from the intricacies of message authentication, validation - and optionally encryption. - - :param topic: The topic name of the queue - :param host: The server name, together with the topic it forms a unique - name that is used to source signing keys, and verify - incoming messages. - :param conf: a ConfigOpts object - :param key: (optional) explicitly pass in endpoint private key. - If not provided it will be sourced from the service config - :param key_store: (optional) Storage class for local caching - :param encrypt: (defaults to False) Whether to encrypt messages - :param enctype: (defaults to AES) Cipher to use - :param hashtype: (defaults to SHA256) Hash function to use for signatures - """ - - def __init__(self, topic, host, conf, key=None, key_store=None, - encrypt=None, enctype='AES', hashtype='SHA256'): - - conf.register_group(secure_message_group) - conf.register_opts(secure_message_opts, group='secure_messages') - - self._name = '%s.%s' % (topic, host) - self._key = key - self._conf = conf.secure_messages - self._encrypt = self._conf.encrypt if (encrypt is None) else encrypt - self._crypto = cryptoutils.SymmetricCrypto(enctype, hashtype) - self._hkdf = cryptoutils.HKDF(hashtype) - self._kds = _KDSClient(self._conf.kds_endpoint) - - if self._key is None: - self._key = self._init_key(topic, self._name) - if self._key is None: - err = "Secret Key (or key file) is missing or malformed" - raise SharedKeyNotFound(self._name, err) - - self._key_store = key_store or _KEY_STORE - - def _init_key(self, topic, name): - keys = None - if self._conf.secret_keys_file: - with open(self._conf.secret_keys_file, 'r') as f: - keys = f.readlines() - elif self._conf.secret_key: - keys = self._conf.secret_key - - if keys is None: - return None - - for k in keys: - if k[0] == '#': - continue - if ':' not in k: - break - svc, key = k.split(':', 1) - if svc == topic or svc == name: - return base64.b64decode(key) - - return None - - def _split_key(self, key, size): - sig_key = key[:size] - enc_key = key[size:] - return sig_key, enc_key - - def _decode_esek(self, key, source, target, timestamp, esek): - """This function decrypts the esek buffer passed in and returns a - KeyStore to be used to check and decrypt the received message. - - :param key: The key to use to decrypt the ticket (esek) - :param source: The name of the source service - :param traget: The name of the target service - :param timestamp: The incoming message timestamp - :param esek: a base64 encoded encrypted block containing a JSON string - """ - rkey = None - - try: - s = self._crypto.decrypt(key, esek) - j = jsonutils.loads(s) - - rkey = base64.b64decode(j['key']) - expiration = j['timestamp'] + j['ttl'] - if j['timestamp'] > timestamp or timestamp > expiration: - raise InvalidExpiredTicket(source, target) - - except Exception: - raise InvalidEncryptedTicket(source, target) - - info = '%s,%s,%s' % (source, target, str(j['timestamp'])) - - sek = self._hkdf.expand(rkey, info, len(key) * 2) - - return self._split_key(sek, len(key)) - - def _get_ticket(self, target): - """This function will check if we already have a SEK for the specified - target in the cache, or will go and try to fetch a new SEK from the key - server. - - :param target: The name of the target service - """ - ticket = self._key_store.get_ticket(self._name, target) - - if ticket is not None: - return ticket - - tkt, expiration = self._kds.get_ticket(self._name, target, - self._crypto, self._key) - - self._key_store.put_ticket(self._name, target, - base64.b64decode(tkt['skey']), - base64.b64decode(tkt['ekey']), - tkt['esek'], expiration) - return self._key_store.get_ticket(self._name, target) - - def encode(self, version, target, json_msg): - """This is the main encoding function. - - It takes a target and a message and returns a tuple consisting of a - JSON serialized metadata object, a JSON serialized (and optionally - encrypted) message, and a signature. - - :param version: the current envelope version - :param target: The name of the target service (usually with hostname) - :param json_msg: a serialized json message object - """ - ticket = self._get_ticket(target) - - metadata = jsonutils.dumps({'source': self._name, - 'destination': target, - 'timestamp': time.time(), - 'nonce': _get_nonce(), - 'esek': ticket.esek, - 'encryption': self._encrypt}) - - message = json_msg - if self._encrypt: - message = self._crypto.encrypt(ticket.ekey, message) - - signature = self._crypto.sign(ticket.skey, - version + metadata + message) - - return (metadata, message, signature) - - def decode(self, version, metadata, message, signature): - """This is the main decoding function. - - It takes a version, metadata, message and signature strings and - returns a tuple with a (decrypted) message and metadata or raises - an exception in case of error. - - :param version: the current envelope version - :param metadata: a JSON serialized object with metadata for validation - :param message: a JSON serialized (base64 encoded encrypted) message - :param signature: a base64 encoded signature - """ - md = jsonutils.loads(metadata) - - check_args = ('source', 'destination', 'timestamp', - 'nonce', 'esek', 'encryption') - for arg in check_args: - if arg not in md: - raise InvalidMetadata('Missing metadata "%s"' % arg) - - if md['destination'] != self._name: - # TODO(simo) handle group keys by checking target - raise UnknownDestinationName(md['destination']) - - try: - skey, ekey = self._decode_esek(self._key, - md['source'], md['destination'], - md['timestamp'], md['esek']) - except InvalidExpiredTicket: - raise - except Exception: - raise InvalidMetadata('Failed to decode ESEK for %s/%s' % ( - md['source'], md['destination'])) - - sig = self._crypto.sign(skey, version + metadata + message) - - if sig != signature: - raise InvalidSignature(md['source'], md['destination']) - - if md['encryption'] is True: - msg = self._crypto.decrypt(ekey, message) - else: - msg = message - - return (md, msg) diff --git a/libra/openstack/common/rpc/serializer.py b/libra/openstack/common/rpc/serializer.py deleted file mode 100644 index 5fd346d6..00000000 --- a/libra/openstack/common/rpc/serializer.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides the definition of an RPC serialization handler""" - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class Serializer(object): - """Generic (de-)serialization definition base class.""" - - @abc.abstractmethod - def serialize_entity(self, context, entity): - """Serialize something to primitive form. - - :param context: Security context - :param entity: Entity to be serialized - :returns: Serialized form of entity - """ - pass - - @abc.abstractmethod - def deserialize_entity(self, context, entity): - """Deserialize something from primitive form. - - :param context: Security context - :param entity: Primitive to be deserialized - :returns: Deserialized form of entity - """ - pass - - -class NoOpSerializer(Serializer): - """A serializer that does nothing.""" - - def serialize_entity(self, context, entity): - return entity - - def deserialize_entity(self, context, entity): - return entity diff --git a/libra/openstack/common/rpc/service.py b/libra/openstack/common/rpc/service.py deleted file mode 100644 index 6bbd616b..00000000 --- a/libra/openstack/common/rpc/service.py +++ /dev/null @@ -1,78 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2011 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import log as logging -from libra.openstack.common import rpc -from libra.openstack.common.rpc import dispatcher as rpc_dispatcher -from libra.openstack.common import service - - -LOG = logging.getLogger(__name__) - - -class Service(service.Service): - """Service object for binaries running on hosts. - - A service enables rpc by listening to queues based on topic and host. - """ - def __init__(self, host, topic, manager=None, serializer=None): - super(Service, self).__init__() - self.host = host - self.topic = topic - self.serializer = serializer - if manager is None: - self.manager = self - else: - self.manager = manager - - def start(self): - super(Service, self).start() - - self.conn = rpc.create_connection(new=True) - LOG.debug(_("Creating Consumer connection for Service %s") % - self.topic) - - dispatcher = rpc_dispatcher.RpcDispatcher([self.manager], - self.serializer) - - # Share this same connection for these Consumers - self.conn.create_consumer(self.topic, dispatcher, fanout=False) - - node_topic = '%s.%s' % (self.topic, self.host) - self.conn.create_consumer(node_topic, dispatcher, fanout=False) - - self.conn.create_consumer(self.topic, dispatcher, fanout=True) - - # Hook to allow the manager to do other initializations after - # the rpc connection is created. - if callable(getattr(self.manager, 'initialize_service_hook', None)): - self.manager.initialize_service_hook(self) - - # Consume from all consumers in a thread - self.conn.consume_in_thread() - - def stop(self): - # Try to shut the connection down, but if we get any sort of - # errors, go ahead and ignore them.. as we're shutting down anyway - try: - self.conn.close() - except Exception: - pass - super(Service, self).stop() diff --git a/libra/openstack/common/rpc/zmq_receiver.py b/libra/openstack/common/rpc/zmq_receiver.py deleted file mode 100644 index 5d009c40..00000000 --- a/libra/openstack/common/rpc/zmq_receiver.py +++ /dev/null @@ -1,40 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -eventlet.monkey_patch() - -import contextlib -import sys - -from oslo.config import cfg - -from libra.openstack.common import log as logging -from libra.openstack.common import rpc -from libra.openstack.common.rpc import impl_zmq - -CONF = cfg.CONF -CONF.register_opts(rpc.rpc_opts) -CONF.register_opts(impl_zmq.zmq_opts) - - -def main(): - CONF(sys.argv[1:], project='oslo') - logging.setup("oslo") - - with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: - reactor.consume_in_thread() - reactor.wait() diff --git a/libra/openstack/common/service.py b/libra/openstack/common/service.py deleted file mode 100644 index 3038bf67..00000000 --- a/libra/openstack/common/service.py +++ /dev/null @@ -1,462 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic Node base class for all workers that run on hosts.""" - -import errno -import os -import random -import signal -import sys -import time - -import eventlet -from eventlet import event -import logging as std_logging -from oslo.config import cfg - -from libra.openstack.common import eventlet_backdoor -from libra.openstack.common.gettextutils import _ # noqa -from libra.openstack.common import importutils -from libra.openstack.common import log as logging -from libra.openstack.common import threadgroup - - -rpc = importutils.try_import('libra.openstack.common.rpc') -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _sighup_supported(): - return hasattr(signal, 'SIGHUP') - - -def _is_sighup(signo): - return _sighup_supported() and signo == signal.SIGHUP - - -def _signo_to_signame(signo): - signals = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'} - if _sighup_supported(): - signals[signal.SIGHUP] = 'SIGHUP' - return signals[signo] - - -def _set_signals_handler(handler): - signal.signal(signal.SIGTERM, handler) - signal.signal(signal.SIGINT, handler) - if _sighup_supported(): - signal.signal(signal.SIGHUP, handler) - - -class Launcher(object): - """Launch one or more services and wait for them to complete.""" - - def __init__(self): - """Initialize the service launcher. - - :returns: None - - """ - self.services = Services() - self.backdoor_port = eventlet_backdoor.initialize_if_enabled() - g_ - - def launch_service(self, service): - """Load and start the given service. - - :param service: The service you would like to start. - :returns: None - - """ - service.backdoor_port = self.backdoor_port - self.services.add(service) - - def stop(self): - """Stop all services which are currently running. - - :returns: None - - """ - self.services.stop() - - def wait(self): - """Waits until all services have been stopped, and then returns. - - :returns: None - - """ - self.services.wait() - - def restart(self): - """Reload config files and restart service. - - :returns: None - - """ - cfg.CONF.reload_config_files() - self.services.restart() - - -class SignalExit(SystemExit): - def __init__(self, signo, exccode=1): - super(SignalExit, self).__init__(exccode) - self.signo = signo - - -class ServiceLauncher(Launcher): - def _handle_signal(self, signo, frame): - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - raise SignalExit(signo) - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _wait_for_exit_or_signal(self, ready_callback=None): - status = None - signo = 0 - - LOG.debug(_('Full set of CONF:')) - CONF.log_opt_values(LOG, std_logging.DEBUG) - - try: - if ready_callback: - ready_callback() - super(ServiceLauncher, self).wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_('Caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - finally: - self.stop() - if rpc: - try: - rpc.cleanup() - except Exception: - # We're shutting down, so it doesn't matter at this point. - LOG.exception(_('Exception during rpc cleanup.')) - - return status, signo - - def wait(self, ready_callback=None): - while True: - self.handle_signal() - status, signo = self._wait_for_exit_or_signal(ready_callback) - if not _is_sighup(signo): - return status - self.restart() - - -class ServiceWrapper(object): - def __init__(self, service, workers): - self.service = service - self.workers = workers - self.children = set() - self.forktimes = [] - - -class ProcessLauncher(object): - def __init__(self): - self.children = {} - self.sigcaught = None - self.running = True - rfd, self.writepipe = os.pipe() - self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') - self.handle_signal() - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _handle_signal(self, signo, frame): - self.sigcaught = signo - self.running = False - - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - - def _pipe_watcher(self): - # This will block until the write end is closed when the parent - # dies unexpectedly - self.readpipe.read() - - LOG.info(_('Parent process has died unexpectedly, exiting')) - - sys.exit(1) - - def _child_process_handle_signal(self): - # Setup child signal handlers differently - def _sigterm(*args): - signal.signal(signal.SIGTERM, signal.SIG_DFL) - raise SignalExit(signal.SIGTERM) - - def _sighup(*args): - signal.signal(signal.SIGHUP, signal.SIG_DFL) - raise SignalExit(signal.SIGHUP) - - signal.signal(signal.SIGTERM, _sigterm) - if _sighup_supported(): - signal.signal(signal.SIGHUP, _sighup) - # Block SIGINT and let the parent send us a SIGTERM - signal.signal(signal.SIGINT, signal.SIG_IGN) - - def _child_wait_for_exit_or_signal(self, launcher): - status = None - signo = 0 - - # NOTE(johannes): All exceptions are caught to ensure this - # doesn't fallback into the loop spawning children. It would - # be bad for a child to spawn more children. - try: - launcher.wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_('Caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - except BaseException: - LOG.exception(_('Unhandled exception')) - status = 2 - finally: - launcher.stop() - - return status, signo - - def _child_process(self, service): - self._child_process_handle_signal() - - # Reopen the eventlet hub to make sure we don't share an epoll - # fd with parent and/or siblings, which would be bad - eventlet.hubs.use_hub() - - # Close write to ensure only parent has it open - os.close(self.writepipe) - # Create greenthread to watch for parent to close pipe - eventlet.spawn_n(self._pipe_watcher) - - # Reseed random number generator - random.seed() - - launcher = Launcher() - launcher.launch_service(service) - return launcher - - def _start_child(self, wrap): - if len(wrap.forktimes) > wrap.workers: - # Limit ourselves to one process a second (over the period of - # number of workers * 1 second). This will allow workers to - # start up quickly but ensure we don't fork off children that - # die instantly too quickly. - if time.time() - wrap.forktimes[0] < wrap.workers: - LOG.info(_('Forking too fast, sleeping')) - time.sleep(1) - - wrap.forktimes.pop(0) - - wrap.forktimes.append(time.time()) - - pid = os.fork() - if pid == 0: - launcher = self._child_process(wrap.service) - while True: - self._child_process_handle_signal() - status, signo = self._child_wait_for_exit_or_signal(launcher) - if not _is_sighup(signo): - break - launcher.restart() - - os._exit(status) - - LOG.info(_('Started child %d'), pid) - - wrap.children.add(pid) - self.children[pid] = wrap - - return pid - - def launch_service(self, service, workers=1): - wrap = ServiceWrapper(service, workers) - - LOG.info(_('Starting %d workers'), wrap.workers) - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def _wait_child(self): - try: - # Don't block if no child processes have exited - pid, status = os.waitpid(0, os.WNOHANG) - if not pid: - return None - except OSError as exc: - if exc.errno not in (errno.EINTR, errno.ECHILD): - raise - return None - - if os.WIFSIGNALED(status): - sig = os.WTERMSIG(status) - LOG.info(_('Child %(pid)d killed by signal %(sig)d'), - dict(pid=pid, sig=sig)) - else: - code = os.WEXITSTATUS(status) - LOG.info(_('Child %(pid)s exited with status %(code)d'), - dict(pid=pid, code=code)) - - if pid not in self.children: - LOG.warning(_('pid %d not in child list'), pid) - return None - - wrap = self.children.pop(pid) - wrap.children.remove(pid) - return wrap - - def _respawn_children(self): - while self.running: - wrap = self._wait_child() - if not wrap: - # Yield to other threads if no children have exited - # Sleep for a short time to avoid excessive CPU usage - # (see bug #1095346) - eventlet.greenthread.sleep(.01) - continue - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def wait(self): - """Loop waiting on children to die and respawning as necessary.""" - - LOG.debug(_('Full set of CONF:')) - CONF.log_opt_values(LOG, std_logging.DEBUG) - - while True: - self.handle_signal() - self._respawn_children() - if self.sigcaught: - signame = _signo_to_signame(self.sigcaught) - LOG.info(_('Caught %s, stopping children'), signame) - if not _is_sighup(self.sigcaught): - break - - for pid in self.children: - os.kill(pid, signal.SIGHUP) - self.running = True - self.sigcaught = None - - for pid in self.children: - try: - os.kill(pid, signal.SIGTERM) - except OSError as exc: - if exc.errno != errno.ESRCH: - raise - - # Wait for children to die - if self.children: - LOG.info(_('Waiting on %d children to exit'), len(self.children)) - while self.children: - self._wait_child() - - -class Service(object): - """Service object for binaries running on hosts.""" - - def __init__(self, threads=1000): - self.tg = threadgroup.ThreadGroup(threads) - - # signal that the service is done shutting itself down: - self._done = event.Event() - - def reset(self): - # NOTE(Fengqian): docs for Event.reset() recommend against using it - self._done = event.Event() - - def start(self): - pass - - def stop(self): - self.tg.stop() - self.tg.wait() - # Signal that service cleanup is done: - if not self._done.ready(): - self._done.send() - - def wait(self): - self._done.wait() - - -class Services(object): - - def __init__(self): - self.services = [] - self.tg = threadgroup.ThreadGroup() - self.done = event.Event() - - def add(self, service): - self.services.append(service) - self.tg.add_thread(self.run_service, service, self.done) - - def stop(self): - # wait for graceful shutdown of services: - for service in self.services: - service.stop() - service.wait() - - # Each service has performed cleanup, now signal that the run_service - # wrapper threads can now die: - if not self.done.ready(): - self.done.send() - - # reap threads: - self.tg.stop() - - def wait(self): - self.tg.wait() - - def restart(self): - self.stop() - self.done = event.Event() - for restart_service in self.services: - restart_service.reset() - self.tg.add_thread(self.run_service, restart_service, self.done) - - @staticmethod - def run_service(service, done): - """Service start wrapper. - - :param service: service to run - :param done: event to wait on until a shutdown is triggered - :returns: None - - """ - service.start() - done.wait() - - -def launch(service, workers=None): - if workers: - launcher = ProcessLauncher() - launcher.launch_service(service, workers=workers) - else: - launcher = ServiceLauncher() - launcher.launch_service(service) - return launcher diff --git a/libra/openstack/common/sslutils.py b/libra/openstack/common/sslutils.py deleted file mode 100644 index ae035925..00000000 --- a/libra/openstack/common/sslutils.py +++ /dev/null @@ -1,100 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import ssl - -from oslo.config import cfg - -from libra.openstack.common.gettextutils import _ # noqa - - -ssl_opts = [ - cfg.StrOpt('ca_file', - default=None, - help="CA certificate file to use to verify " - "connecting clients"), - cfg.StrOpt('cert_file', - default=None, - help="Certificate file to use when starting " - "the server securely"), - cfg.StrOpt('key_file', - default=None, - help="Private key file to use when starting " - "the server securely"), -] - - -CONF = cfg.CONF -CONF.register_opts(ssl_opts, "ssl") - - -def is_enabled(): - cert_file = CONF.ssl.cert_file - key_file = CONF.ssl.key_file - ca_file = CONF.ssl.ca_file - use_ssl = cert_file or key_file - - if cert_file and not os.path.exists(cert_file): - raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) - - if ca_file and not os.path.exists(ca_file): - raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) - - if key_file and not os.path.exists(key_file): - raise RuntimeError(_("Unable to find key_file : %s") % key_file) - - if use_ssl and (not cert_file or not key_file): - raise RuntimeError(_("When running server in SSL mode, you must " - "specify both a cert_file and key_file " - "option value in your configuration file")) - - return use_ssl - - -def wrap(sock): - ssl_kwargs = { - 'server_side': True, - 'certfile': CONF.ssl.cert_file, - 'keyfile': CONF.ssl.key_file, - 'cert_reqs': ssl.CERT_NONE, - } - - if CONF.ssl.ca_file: - ssl_kwargs['ca_certs'] = CONF.ssl.ca_file - ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED - - return ssl.wrap_socket(sock, **ssl_kwargs) - - -_SSL_PROTOCOLS = { - "tlsv1": ssl.PROTOCOL_TLSv1, - "sslv23": ssl.PROTOCOL_SSLv23, - "sslv3": ssl.PROTOCOL_SSLv3 -} - -try: - _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 -except AttributeError: - pass - - -def validate_ssl_version(version): - key = version.lower() - try: - return _SSL_PROTOCOLS[key] - except KeyError: - raise RuntimeError(_("Invalid SSL version : %s") % version) diff --git a/libra/openstack/common/test.py b/libra/openstack/common/test.py deleted file mode 100644 index 84e73bb4..00000000 --- a/libra/openstack/common/test.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Common utilities used in testing""" - -import os - -import fixtures -import testtools - - -class BaseTestCase(testtools.TestCase): - - def setUp(self): - super(BaseTestCase, self).setUp() - self._set_timeout() - self._fake_output() - self.useFixture(fixtures.FakeLogger('libra.openstack.common')) - self.useFixture(fixtures.NestedTempfile()) - - def _set_timeout(self): - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - def _fake_output(self): - if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or - os.environ.get('OS_STDOUT_CAPTURE') == '1'): - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or - os.environ.get('OS_STDERR_CAPTURE') == '1'): - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) diff --git a/libra/openstack/common/threadgroup.py b/libra/openstack/common/threadgroup.py deleted file mode 100644 index d2b7c240..00000000 --- a/libra/openstack/common/threadgroup.py +++ /dev/null @@ -1,121 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from eventlet import greenpool -from eventlet import greenthread - -from libra.openstack.common import log as logging -from libra.openstack.common import loopingcall - - -LOG = logging.getLogger(__name__) - - -def _thread_done(gt, *args, **kwargs): - """Callback function to be passed to GreenThread.link() when we spawn() - Calls the :class:`ThreadGroup` to notify if. - - """ - kwargs['group'].thread_done(kwargs['thread']) - - -class Thread(object): - """Wrapper around a greenthread, that holds a reference to the - :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when - it has done so it can be removed from the threads list. - """ - def __init__(self, thread, group): - self.thread = thread - self.thread.link(_thread_done, group=group, thread=self) - - def stop(self): - self.thread.kill() - - def wait(self): - return self.thread.wait() - - -class ThreadGroup(object): - """The point of the ThreadGroup classis to: - - * keep track of timers and greenthreads (making it easier to stop them - when need be). - * provide an easy API to add timers. - """ - def __init__(self, thread_pool_size=10): - self.pool = greenpool.GreenPool(thread_pool_size) - self.threads = [] - self.timers = [] - - def add_dynamic_timer(self, callback, initial_delay=None, - periodic_interval_max=None, *args, **kwargs): - timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) - timer.start(initial_delay=initial_delay, - periodic_interval_max=periodic_interval_max) - self.timers.append(timer) - - def add_timer(self, interval, callback, initial_delay=None, - *args, **kwargs): - pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) - pulse.start(interval=interval, - initial_delay=initial_delay) - self.timers.append(pulse) - - def add_thread(self, callback, *args, **kwargs): - gt = self.pool.spawn(callback, *args, **kwargs) - th = Thread(gt, self) - self.threads.append(th) - - def thread_done(self, thread): - self.threads.remove(thread) - - def stop(self): - current = greenthread.getcurrent() - for x in self.threads: - if x is current: - # don't kill the current thread. - continue - try: - x.stop() - except Exception as ex: - LOG.exception(ex) - - for x in self.timers: - try: - x.stop() - except Exception as ex: - LOG.exception(ex) - self.timers = [] - - def wait(self): - for x in self.timers: - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) - current = greenthread.getcurrent() - for x in self.threads: - if x is current: - continue - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) diff --git a/libra/openstack/common/timeutils.py b/libra/openstack/common/timeutils.py deleted file mode 100644 index 98d877d5..00000000 --- a/libra/openstack/common/timeutils.py +++ /dev/null @@ -1,197 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Time related utilities and helper functions. -""" - -import calendar -import datetime -import time - -import iso8601 -import six - - -# ISO 8601 extended time format with microseconds -_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' -_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' -PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND - - -def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format.""" - if not at: - at = utcnow() - st = at.strftime(_ISO8601_TIME_FORMAT - if not subsecond - else _ISO8601_TIME_FORMAT_SUBSECOND) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - st += ('Z' if tz == 'UTC' else tz) - return st - - -def parse_isotime(timestr): - """Parse time from ISO 8601 format.""" - try: - return iso8601.parse_date(timestr) - except iso8601.ParseError as e: - raise ValueError(unicode(e)) - except TypeError as e: - raise ValueError(unicode(e)) - - -def strtime(at=None, fmt=PERFECT_TIME_FORMAT): - """Returns formatted utcnow.""" - if not at: - at = utcnow() - return at.strftime(fmt) - - -def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): - """Turn a formatted time back into a datetime.""" - return datetime.datetime.strptime(timestr, fmt) - - -def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object.""" - offset = timestamp.utcoffset() - if offset is None: - return timestamp - return timestamp.replace(tzinfo=None) - offset - - -def is_older_than(before, seconds): - """Return True if before is older than seconds.""" - if isinstance(before, six.string_types): - before = parse_strtime(before).replace(tzinfo=None) - return utcnow() - before > datetime.timedelta(seconds=seconds) - - -def is_newer_than(after, seconds): - """Return True if after is newer than seconds.""" - if isinstance(after, six.string_types): - after = parse_strtime(after).replace(tzinfo=None) - return after - utcnow() > datetime.timedelta(seconds=seconds) - - -def utcnow_ts(): - """Timestamp version of our utcnow function.""" - if utcnow.override_time is None: - # NOTE(kgriffs): This is several times faster - # than going through calendar.timegm(...) - return int(time.time()) - - return calendar.timegm(utcnow().timetuple()) - - -def utcnow(): - """Overridable version of utils.utcnow.""" - if utcnow.override_time: - try: - return utcnow.override_time.pop(0) - except AttributeError: - return utcnow.override_time - return datetime.datetime.utcnow() - - -def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp.""" - return isotime(datetime.datetime.utcfromtimestamp(timestamp)) - - -utcnow.override_time = None - - -def set_time_override(override_time=None): - """Overrides utils.utcnow. - - Make it return a constant time or a list thereof, one at a time. - - :param override_time: datetime instance or list thereof. If not - given, defaults to the current UTC time. - """ - utcnow.override_time = override_time or datetime.datetime.utcnow() - - -def advance_time_delta(timedelta): - """Advance overridden time using a datetime.timedelta.""" - assert(not utcnow.override_time is None) - try: - for dt in utcnow.override_time: - dt += timedelta - except TypeError: - utcnow.override_time += timedelta - - -def advance_time_seconds(seconds): - """Advance overridden time by seconds.""" - advance_time_delta(datetime.timedelta(0, seconds)) - - -def clear_time_override(): - """Remove the overridden time.""" - utcnow.override_time = None - - -def marshall_now(now=None): - """Make an rpc-safe datetime with microseconds. - - Note: tzinfo is stripped, but not required for relative times. - """ - if not now: - now = utcnow() - return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, - minute=now.minute, second=now.second, - microsecond=now.microsecond) - - -def unmarshall_time(tyme): - """Unmarshall a datetime dict.""" - return datetime.datetime(day=tyme['day'], - month=tyme['month'], - year=tyme['year'], - hour=tyme['hour'], - minute=tyme['minute'], - second=tyme['second'], - microsecond=tyme['microsecond']) - - -def delta_seconds(before, after): - """Return the difference between two timing objects. - - Compute the difference in seconds between two date, time, or - datetime objects (as a float, to microsecond resolution). - """ - delta = after - before - try: - return delta.total_seconds() - except AttributeError: - return ((delta.days * 24 * 3600) + delta.seconds + - float(delta.microseconds) / (10 ** 6)) - - -def is_soon(dt, window): - """Determines if time is going to happen in the next window seconds. - - :params dt: the time - :params window: minimum seconds to remain to consider the time not soon - - :return: True if expiration is within the given duration - """ - soon = (utcnow() + datetime.timedelta(seconds=window)) - return normalize_time(dt) <= soon diff --git a/libra/openstack/common/uuidutils.py b/libra/openstack/common/uuidutils.py deleted file mode 100644 index 7608acb9..00000000 --- a/libra/openstack/common/uuidutils.py +++ /dev/null @@ -1,39 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -UUID related utilities and helper functions. -""" - -import uuid - - -def generate_uuid(): - return str(uuid.uuid4()) - - -def is_uuid_like(val): - """Returns validation of a value as a UUID. - - For our purposes, a UUID is a canonical form string: - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa - - """ - try: - return str(uuid.UUID(val)) == val - except (TypeError, ValueError, AttributeError): - return False diff --git a/libra/openstack/common/versionutils.py b/libra/openstack/common/versionutils.py deleted file mode 100644 index f7b1f8a8..00000000 --- a/libra/openstack/common/versionutils.py +++ /dev/null @@ -1,45 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helpers for comparing version strings. -""" - -import pkg_resources - - -def is_compatible(requested_version, current_version, same_major=True): - """Determine whether `requested_version` is satisfied by - `current_version`; in other words, `current_version` is >= - `requested_version`. - - :param requested_version: version to check for compatibility - :param current_version: version to check against - :param same_major: if True, the major version must be identical between - `requested_version` and `current_version`. This is used when a - major-version difference indicates incompatibility between the two - versions. Since this is the common-case in practice, the default is - True. - :returns: True if compatible, False if not - """ - requested_parts = pkg_resources.parse_version(requested_version) - current_parts = pkg_resources.parse_version(current_version) - - if same_major and (requested_parts[0] != current_parts[0]): - return False - - return current_parts >= requested_parts diff --git a/libra/openstack/common/xmlutils.py b/libra/openstack/common/xmlutils.py deleted file mode 100644 index b131d3e2..00000000 --- a/libra/openstack/common/xmlutils.py +++ /dev/null @@ -1,74 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from xml.dom import minidom -from xml.parsers import expat -from xml import sax -from xml.sax import expatreader - - -class ProtectedExpatParser(expatreader.ExpatParser): - """An expat parser which disables DTD's and entities by default.""" - - def __init__(self, forbid_dtd=True, forbid_entities=True, - *args, **kwargs): - # Python 2.x old style class - expatreader.ExpatParser.__init__(self, *args, **kwargs) - self.forbid_dtd = forbid_dtd - self.forbid_entities = forbid_entities - - def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): - raise ValueError("Inline DTD forbidden") - - def entity_decl(self, entityName, is_parameter_entity, value, base, - systemId, publicId, notationName): - raise ValueError(" entity declaration forbidden") - - def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): - # expat 1.2 - raise ValueError(" unparsed entity forbidden") - - def external_entity_ref(self, context, base, systemId, publicId): - raise ValueError(" external entity forbidden") - - def notation_decl(self, name, base, sysid, pubid): - raise ValueError(" notation forbidden") - - def reset(self): - expatreader.ExpatParser.reset(self) - if self.forbid_dtd: - self._parser.StartDoctypeDeclHandler = self.start_doctype_decl - self._parser.EndDoctypeDeclHandler = None - if self.forbid_entities: - self._parser.EntityDeclHandler = self.entity_decl - self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl - self._parser.ExternalEntityRefHandler = self.external_entity_ref - self._parser.NotationDeclHandler = self.notation_decl - try: - self._parser.SkippedEntityHandler = None - except AttributeError: - # some pyexpat versions do not support SkippedEntity - pass - - -def safe_minidom_parse_string(xml_string): - """Parse an XML string using minidom safely. - - """ - try: - return minidom.parseString(xml_string, parser=ProtectedExpatParser()) - except sax.SAXParseException: - raise expat.ExpatError() diff --git a/libra/tests/__init__.py b/libra/tests/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/tests/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/tests/admin_api/__init__.py b/libra/tests/admin_api/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/tests/admin_api/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/tests/api/__init__.py b/libra/tests/api/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/tests/api/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/tests/api/v1_1/__init__.py b/libra/tests/api/v1_1/__init__.py deleted file mode 100644 index 9cd1ad67..00000000 --- a/libra/tests/api/v1_1/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from libra.tests import api_base - - -""" -Base TestCase for V1.1 API tests. -""" - - -class TestCase(api_base.TestCase): - def setUp(self): - root_dir = self.path_get() - - config = { - 'app': { - 'root': 'libra.api.controllers.root.RootController', - 'modules': ['libra.api'], - 'static_root': '%s/public' % root_dir, - 'template_path': '%s/libra/api/templates' % root_dir, - 'enable_acl': False, - }, - 'wsme': { - 'debug': True, - } - } - self.app = self._make_app(config) diff --git a/libra/tests/api_base.py b/libra/tests/api_base.py deleted file mode 100644 index b98b2c59..00000000 --- a/libra/tests/api_base.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from libra.tests import base -import pecan -import pecan.testing - -from oslo.config import cfg - - -class TestCase(base.TestCase): - """Used for functional tests of Pecan controllers where you need to - test your literal application and its integration with the - framework. - """ - def _make_app(self, config=None, enable_acl=False): - # Determine where we are so we can set up paths in the config - root_dir = self.path_get() - self.config = config or self.config - return pecan.testing.load_test_app(self.config) - - def tearDown(self): - super(FunctionalTest, self).tearDown() - self.app = None - pecan.set_config({}, overwrite=True) diff --git a/libra/tests/base.py b/libra/tests/base.py deleted file mode 100644 index bb7d5b33..00000000 --- a/libra/tests/base.py +++ /dev/null @@ -1,179 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copied partially from ceilometer - -"""Base classes for our unit tests. - -Allows overriding of config for use of fakes, and some black magic for -inline callbacks. - -""" - -import eventlet -eventlet.monkey_patch(os=False) - -import copy -import os -import shutil -import tempfile -import sys - -import fixtures -import testtools - -#from libra.db import migration -from libra.common import options -from libra.openstack.common import log -from libra.openstack.common import test -from libra.openstack.common.fixture import config -from libra.openstack.common.fixture import moxstubout -from libra.openstack.common.notifier import test_notifier - - -options.CONF.set_override('use_stderr', False) - -# NOTE: Tests fail due to diverse options being required. -options.CONF.import_group('api', 'libra.api') -options.CONF.import_group('mgm', 'libra.mgm') - -# Used for notification testing. -options.CONF.import_opt('rpc_backend', 'libra.openstack.common.rpc') -options.CONF.import_opt( - 'notification_driver', - 'libra.openstack.common.notifier.api') - -log.setup('libra') - -_DB_CACHE = None - - -class NotifierFixture(fixtures.Fixture): - def tearDown(self): - self.clear() - - def get(self): - return test_notifier.NOTIFICATIONS - - def clear(self): - test_notifier.NOTIFICATIONS = [] - - -class Database(fixtures.Fixture): - """ - Fixture for Databases. Handles syncing, tearing down etc. - """ - def __init__(self, db_session, db_migrate, sql_connection, - sqlite_db, sqlite_clean_db): - self.sql_connection = sql_connection - self.sqlite_db = sqlite_db - self.sqlite_clean_db = sqlite_clean_db - - self.engine = db_session.get_engine() - self.engine.dispose() - conn = self.engine.connect() - if sql_connection == "sqlite://": - if db_migrate.db_version() > db_migrate.INIT_VERSION: - return - else: - testdb = os.path.join(CONF.state_path, sqlite_db) - if os.path.exists(testdb): - return - db_migrate.db_sync() -# self.post_migrations() - if sql_connection == "sqlite://": - conn = self.engine.connect() - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - else: - cleandb = os.path.join(CONF.state_path, sqlite_clean_db) - shutil.copyfile(testdb, cleandb) - - def setUp(self): - super(Database, self).setUp() - - if self.sql_connection == "sqlite://": - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - else: - shutil.copyfile( - os.path.join(CONF.state_path, self.sqlite_clean_db), - os.path.join(CONF.state_path, self.sqlite_db)) - - -class TestCase(test.BaseTestCase): - """ - Base test case that holds any "extras" that we use like assertX functions. - """ - - def config(self, **kwargs): - group = kwargs.pop('group', None) - - for k, v in kwargs.iteritems(): - cfg.CONF.set_override(k, v, group) - - def path_get(self, project_file=None): - root = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..', - ) - ) - if project_file: - return os.path.join(root, project_file) - else: - return root - - -class ServiceTestCase(test.BaseTestCase): - """Base test case for Libra tests.""" - def setUp(self): - super(ServiceTestCase, self).setUp() - options.add_common_opts() - self.CONF = self.useFixture(config.Config(options.CONF)).conf - - self.CONF.set_override( - 'notification_driver', - ['libra.openstack.common.notifier.test_notifier'] - ) - - self.CONF.set_override( - 'rpc_backend', 'libra.openstack.common.rpc.impl_fake' - ) - - - # NOTE: Provide some fun defaults for testing - self.CONF.set_override('az', 'default', group='mgm') - self.CONF.set_override('nova_secgroup', 'default', group='mgm') - self.CONF.set_override('nova_image', 'image', group='mgm') - self.CONF.set_override('nova_image_size', 'm1.small', group='mgm') - self.CONF.set_override('nova_keyname', 'key', group='mgm') - self.CONF.set_override('nova_user', 'user', group='mgm') - self.CONF.set_override('nova_pass', 'secret', group='mgm') - self.CONF.set_override('nova_auth_url', 'http://localhost:35357/2.0', - group='mgm') - self.CONF.set_override('nova_region', 'region', group='mgm') - - self.CONF.set_override('db_sections', 'test', group='api') - self.CONF.set_override('swift_endpoint', 'test', group='api') - self.CONF.set_override('swift_basepath', 'test', group='api') - - self.notifications = NotifierFixture() - self.useFixture(self.notifications) - - self.CONF([], project='libra') diff --git a/libra/tests/fake_body.json b/libra/tests/fake_body.json deleted file mode 100644 index eeb092bd..00000000 --- a/libra/tests/fake_body.json +++ /dev/null @@ -1,2 +0,0 @@ -{"server": {"status": "ACTIVE", "updated": "2012-10-10T11:55:55Z", "hostId": "", "user_id": "18290556240782", "name": "lbass_0", "links": [{"href": "https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v1.1/58012755801586/servers/417773", "rel": "self"}, {"href": "https://az-1.region-a.geo-1.compute.hpcloudsvc.com/58012755801586/servers/417773", "rel": "bookmark"}], "created": "2012-10-10T11:55:55Z", "tenant_id": "58012755801586", "image": {"id": "8419", "links": [{"href": "https://az-1.region-a.geo-1.compute.hpcloudsvc.com/58012755801586/images/8419", "rel": "bookmark"}]}, "adminPass": "u2LKPA73msRTxDMC", "uuid": "14984389-8cc5-4780-be64-2d31ace662ad", "accessIPv4": "", "metadata": {}, "accessIPv6": "", "key_name": "default", "flavor": {"id": "100", "links": [{"href": "https://az-1.region-a.geo-1.compute.hpcloudsvc.com/58012755801586/flavors/100", "rel": "bookmark"}]}, "config_drive": "", "id": 417773, "security_groups": [{"name": "default", "links": [{"href": "https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v1.1/58012755801586/os-security-groups/4008", "rel": "bookmark"}], "id": 4008}], "addresses": {}}} - diff --git a/libra/tests/mgm/___init__.py b/libra/tests/mgm/___init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/tests/mgm/___init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/tests/mock_objects.py b/libra/tests/mock_objects.py deleted file mode 100644 index eeb92566..00000000 --- a/libra/tests/mock_objects.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import logging - -from libra.worker.drivers.haproxy.services_base import ServicesBase - - -class FakeJob(object): - def __init__(self, data): - """ - data: JSON object to convert to a string - """ - self.data = json.dumps(data) - - -class FakeOSServices(ServicesBase): - def service_stop(self): - pass - - def service_start(self): - pass - - def service_reload(self): - pass - - def write_config(self, config_str): - pass - - def remove_configs(self): - pass - - def sudo_rm(self, file): - pass - - def syslog_restart(self): - pass - - -class FakeFaultingOSServices(ServicesBase): - def service_stop(self): - raise Exception("fault") - - def service_start(self): - raise Exception("fault") - - def service_reload(self): - raise Exception("fault") - - def service_restart(self): - raise Exception("fault") - - def write_config(self): - raise Exception("fault") - - def remove_configs(self): - raise Exception("fault") - - -class MockLoggingHandler(logging.Handler): - """Mock logging handler to check for expected logs.""" - - def __init__(self, *args, **kwargs): - self.reset() - logging.Handler.__init__(self, *args, **kwargs) - - def emit(self, record): - self.messages[record.levelname.lower()].append(record.getMessage()) - - def reset(self): - self.messages = { - 'debug': [], - 'info': [], - 'warning': [], - 'error': [], - 'critical': [], - } diff --git a/libra/tests/worker/__init__.py b/libra/tests/worker/__init__.py deleted file mode 100644 index 92bd912f..00000000 --- a/libra/tests/worker/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/tests/worker/test_controller.py b/libra/tests/worker/test_controller.py deleted file mode 100644 index 44be5878..00000000 --- a/libra/tests/worker/test_controller.py +++ /dev/null @@ -1,484 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from libra.tests.base import TestCase -import libra.tests.mock_objects -from libra import __version__ as libra_version -from libra import __release__ as libra_release -from libra.openstack.common import log -from libra.worker.controller import LBaaSController as c -from libra.worker.drivers.base import LoadBalancerDriver -from libra.worker.drivers.haproxy.driver import HAProxyDriver - -LOG = log.getLogger(__name__) - - -class TestWorkerController(TestCase): - def setUp(self): - super(TestWorkerController, self).setUp() - self.driver = HAProxyDriver('libra.tests.mock_objects.FakeOSServices', - None, None, None) - - def testBadAction(self): - msg = { - c.ACTION_FIELD: 'BOGUS' - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn(c.RESPONSE_FIELD, response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_FAILURE) - - def testCaseSensitive(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - 'LoAdBaLaNcErS': [{'protocol': 'http'}] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - - def testUpdate(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ] - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn(c.RESPONSE_FIELD, response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_SUCCESS) - - def testSuspend(self): - msg = { - c.ACTION_FIELD: 'SUSPEND' - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn(c.RESPONSE_FIELD, response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_SUCCESS) - - def testEnable(self): - msg = { - c.ACTION_FIELD: 'ENABLE' - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn(c.RESPONSE_FIELD, response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_SUCCESS) - - def testDelete(self): - msg = { - c.ACTION_FIELD: 'DELETE' - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn(c.RESPONSE_FIELD, response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_SUCCESS) - - def testUpdateMissingNodeID(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'address': '10.0.0.1', - 'port': 80 - } - ] - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing node 'id'") - - def testUpdateEmptyNodeID(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': '', - 'address': '10.0.0.1', - 'port': 80 - } - ] - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing node 'id'") - - def testUpdateMissingLBs(self): - msg = { - c.ACTION_FIELD: 'UPDATE' - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing '%s' element" % c.LBLIST_FIELD) - - def testUpdateMissingNodes(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [{'protocol': 'http'}] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing 'nodes' element") - - def testUpdateMissingProto(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ] - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing required 'protocol' value.") - - def testUpdateGoodMonitor(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ], - 'monitor': { - 'type': 'CONNECT', - 'delay': 60, - 'timeout': 30, - 'attempts': 1, - 'path': '/healthcheck' - } - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertNotIn('badRequest', response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_SUCCESS) - - def testUpdateMonitorMissingType(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ], - 'monitor': { - 'delay': 60, - 'timeout': 30, - 'attempts': 1, - 'path': '/healthcheck' - } - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing monitor value 'type'") - - def testUpdateMonitorMissingDelay(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ], - 'monitor': { - 'type': 'CONNECT', - 'timeout': 30, - 'attempts': 1, - 'path': '/healthcheck' - } - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing monitor value 'delay'") - - def testUpdateMonitorMissingTimeout(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ], - 'monitor': { - 'type': 'CONNECT', - 'delay': 60, - 'attempts': 1, - 'path': '/healthcheck' - } - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing monitor value 'timeout'") - - def testUpdateMonitorMissingAttempts(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ], - 'monitor': { - 'type': 'CONNECT', - 'delay': 60, - 'timeout': 30, - 'path': '/healthcheck' - } - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing monitor value 'attempts'") - - def testUpdateMonitorMissingPath(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ], - 'monitor': { - 'type': 'CONNECT', - 'delay': 60, - 'timeout': 30, - 'attempts': 1 - } - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn(c.RESPONSE_FIELD, response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_SUCCESS) - - def testBadAlgorithm(self): - msg = { - c.ACTION_FIELD: 'UPDATE', - c.LBLIST_FIELD: [ - { - 'protocol': 'http', - 'algorithm': 'BOGUS', - 'nodes': [ - { - 'id': 1234, - 'address': '10.0.0.1', - 'port': 80 - } - ] - } - ] - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn(c.RESPONSE_FIELD, response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_FAILURE) - - def testDiscover(self): - msg = { - c.ACTION_FIELD: 'DISCOVER' - } - controller = c(self.driver, msg) - response = controller.run() - self.assertIn('version', response) - self.assertIn('release', response) - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_SUCCESS) - self.assertEqual(response['version'], libra_version) - self.assertEqual(response['release'], libra_release) - - def testArchiveMissingMethod(self): - msg = { - c.ACTION_FIELD: 'ARCHIVE' - } - null_driver = LoadBalancerDriver() - controller = c(null_driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing '%s' element" % c.OBJ_STORE_TYPE_FIELD) - - def testArchiveInvalidMethod(self): - msg = { - c.ACTION_FIELD: 'ARCHIVE', - c.OBJ_STORE_TYPE_FIELD: 'bad' - } - null_driver = LoadBalancerDriver() - controller = c(null_driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - - def testArchiveSwiftRequiredParams(self): - null_driver = LoadBalancerDriver() - - # Missing basepath field - msg = { - c.ACTION_FIELD: 'ARCHIVE', - c.OBJ_STORE_TYPE_FIELD: 'Swift', - c.OBJ_STORE_ENDPOINT_FIELD: "https://example.com", - c.OBJ_STORE_TOKEN_FIELD: "XXXX", - c.LBLIST_FIELD: [{'protocol': 'http', 'id': '123'}] - } - controller = c(null_driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, - "Missing '%s' element" % c.OBJ_STORE_BASEPATH_FIELD) - - # Missing endpoint field - msg = { - c.ACTION_FIELD: 'ARCHIVE', - c.OBJ_STORE_TYPE_FIELD: 'Swift', - c.OBJ_STORE_BASEPATH_FIELD: "/lbaaslogs", - c.OBJ_STORE_TOKEN_FIELD: "XXXX", - c.LBLIST_FIELD: [{'protocol': 'http', 'id': '123'}] - } - controller = c(null_driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, - "Missing '%s' element" % c.OBJ_STORE_ENDPOINT_FIELD) - - # Missing token field - msg = { - c.ACTION_FIELD: 'ARCHIVE', - c.OBJ_STORE_TYPE_FIELD: 'Swift', - c.OBJ_STORE_BASEPATH_FIELD: "/lbaaslogs", - c.OBJ_STORE_ENDPOINT_FIELD: "https://example.com", - c.LBLIST_FIELD: [{'protocol': 'http', 'id': '123'}] - } - controller = c(null_driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, - "Missing '%s' element" % c.OBJ_STORE_TOKEN_FIELD) - - # Missing load balancer field - msg = { - c.ACTION_FIELD: 'ARCHIVE', - c.OBJ_STORE_TYPE_FIELD: 'Swift', - c.OBJ_STORE_BASEPATH_FIELD: "/lbaaslogs", - c.OBJ_STORE_ENDPOINT_FIELD: "https://example.com", - c.OBJ_STORE_TOKEN_FIELD: "XXXX" - } - controller = c(null_driver, msg) - response = controller.run() - self.assertIn('badRequest', response) - msg = response['badRequest']['validationErrors']['message'] - self.assertEqual(msg, "Missing '%s' element" % c.LBLIST_FIELD) - - def testArchiveNotImplemented(self): - msg = { - c.ACTION_FIELD: 'ARCHIVE', - c.OBJ_STORE_TYPE_FIELD: 'Swift', - c.OBJ_STORE_BASEPATH_FIELD: "/lbaaslogs", - c.OBJ_STORE_ENDPOINT_FIELD: "https://example.com", - c.OBJ_STORE_TOKEN_FIELD: "XXXX", - c.LBLIST_FIELD: [{'protocol': 'http', 'id': '123'}] - } - null_driver = LoadBalancerDriver() - controller = c(null_driver, msg) - response = controller.run() - self.assertEqual(response[c.RESPONSE_FIELD], c.RESPONSE_FAILURE) - self.assertIn(c.ERROR_FIELD, response) - self.assertEqual(response[c.ERROR_FIELD], - "Selected driver does not support ARCHIVE action.") diff --git a/libra/tests/worker/test_driver_haproxy.py b/libra/tests/worker/test_driver_haproxy.py deleted file mode 100644 index b3a2eb76..00000000 --- a/libra/tests/worker/test_driver_haproxy.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from libra.tests.base import TestCase -from libra.worker.drivers.haproxy.driver import HAProxyDriver - - -class TestHAProxyDriver(TestCase): - def setUp(self): - super(TestHAProxyDriver, self).setUp() - self.driver = HAProxyDriver('libra.tests.mock_objects.FakeOSServices', - None, None) - - def testInit(self): - """ Test the HAProxy init() method """ - self.driver.init() - self.assertIsInstance(self.driver._config, dict) - - def testAddProtocol(self): - """ Test the HAProxy set_protocol() method """ - proto = 'http' - self.driver.add_protocol(proto, None) - self.assertIn(proto, self.driver._config) - self.assertEqual(self.driver._config[proto]['bind_address'], '0.0.0.0') - self.assertEqual(self.driver._config[proto]['bind_port'], 80) - - proto = 'tcp' - self.driver.add_protocol(proto, 443) - self.assertIn(proto, self.driver._config) - self.assertEqual(self.driver._config[proto]['bind_address'], '0.0.0.0') - self.assertEqual(self.driver._config[proto]['bind_port'], 443) - - proto = 'galera' - self.driver.add_protocol(proto, 3306) - self.assertIn(proto, self.driver._config) - self.assertEqual(self.driver._config[proto]['bind_address'], '0.0.0.0') - self.assertEqual(self.driver._config[proto]['bind_port'], 3306) - - proto = 'tnetennba' - e = self.assertRaises(Exception, self.driver.add_protocol, proto, 99) - self.assertEqual("Unsupported protocol: %s" % proto, e.message) - - def testAddGaleraRequiresPort(self): - e = self.assertRaises( - Exception, self.driver.add_protocol, 'galera', None) - self.assertEqual("Port is required for this protocol.", e.message) - - def testAddTCPRequiresPort(self): - e = self.assertRaises(Exception, self.driver.add_protocol, 'tcp', None) - self.assertEqual("Port is required for this protocol.", e.message) - - def testAddServer(self): - """ Test the HAProxy add_server() method """ - proto = 'http' - self.driver.add_protocol(proto, None) - self.driver.add_server(proto, 100, '1.2.3.4', 7777) - self.driver.add_server(proto, 101, '5.6.7.8', 8888, 1, True) - self.driver.add_server(proto, 102, '2.3.4.5', 9999, - weight=2, backup=True) - self.assertIn(proto, self.driver._config) - self.assertIn('servers', self.driver._config[proto]) - servers = self.driver._config[proto]['servers'] - self.assertEqual(len(servers), 3) - self.assertEqual(servers[0], (100, '1.2.3.4', 7777, 1, False)) - self.assertEqual(servers[1], (101, '5.6.7.8', 8888, 1, True)) - self.assertEqual(servers[2], (102, '2.3.4.5', 9999, 2, True)) - - def testAddServerMultipleGaleraPrimaries(self): - proto = 'galera' - self.driver.add_protocol(proto, 33306) - self.driver.add_server(proto, 100, '1.2.3.4', 3306, backup=False) - self.driver.add_server(proto, 101, '1.2.3.5', 3306, backup=True) - e = self.assertRaises(Exception, self.driver.add_server, - proto, 101, '1.2.3.6', 3306, backup=False) - self.assertEqual( - "Galera protocol does not accept more than one non-backup node", - e.message) - - def testSetAlgorithm(self): - """ Test the HAProxy set_algorithm() method """ - proto = 'http' - self.driver.add_protocol(proto, None) - self.driver.set_algorithm(proto, self.driver.ROUNDROBIN) - self.assertIn(proto, self.driver._config) - self.assertIn('algorithm', self.driver._config[proto]) - self.assertEqual(self.driver._config[proto]['algorithm'], 'roundrobin') - self.driver.set_algorithm(proto, self.driver.LEASTCONN) - self.assertEqual(self.driver._config[proto]['algorithm'], 'leastconn') - e = self.assertRaises(Exception, self.driver.set_algorithm, proto, 99) - self.assertEqual("Invalid algorithm: http", e.message) - - def testServerWeightInt(self): - """ Test setting integer server weights """ - proto = 'http' - self.driver.add_protocol(proto, None) - self.driver.add_server(proto, 100, '1.2.3.4', 7777, 10) - servers = self.driver._config[proto]['servers'] - self.assertEqual(len(servers), 1) - self.assertEqual(servers[0], (100, '1.2.3.4', 7777, 10, False)) - - def testServerWeightStr(self): - """ Test setting string server weights """ - proto = 'http' - self.driver.add_protocol(proto, None) - self.driver.add_server(proto, 100, '1.2.3.4', 7777, "20") - servers = self.driver._config[proto]['servers'] - self.assertEqual(len(servers), 1) - self.assertEqual(servers[0], (100, '1.2.3.4', 7777, 20, False)) - - def testServerWeightInvalid(self): - """ Test setting string server weights """ - proto = 'http' - self.driver.add_protocol(proto, None) - e = self.assertRaises(Exception, self.driver.add_server, - proto, 100, '1.2.3.4', 7777, 257) - self.assertEqual("Server 'weight' 257 exceeds max of 256", e.message) - - e = self.assertRaises(Exception, self.driver.add_server, - proto, 100, '1.2.3.4', 7777, "abc") - self.assertEqual("Non-integer 'weight' value: 'abc'", e.message) - - def testArchive(self): - """ Test the HAProxy archive() method """ - - # Test an invalid archive method - method = 'invalid' - e = self.assertRaises(Exception, self.driver.archive, method, None) - self.assertEqual( - "Driver does not support archive method '%s'" % method, - e.message) diff --git a/libra/tests/worker/test_stats.py b/libra/tests/worker/test_stats.py deleted file mode 100644 index e3413527..00000000 --- a/libra/tests/worker/test_stats.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import os.path -import tempfile - -from libra.tests.base import TestCase -from libra.worker.drivers.haproxy.stats import StatisticsManager - - -class TestStatisticsManager(TestCase): - - def setUp(self): - super(TestStatisticsManager, self).setUp() - self.tmpfile = tempfile.gettempdir() + "/tstLibraTestStatsMgr.tmp" - self.mgr = StatisticsManager(self.tmpfile) - - def tearDown(self): - if os.path.exists(self.tmpfile): - os.remove(self.tmpfile) - super(TestStatisticsManager, self).tearDown() - - def testReadNoStatsFile(self): - self.assertEqual(self.mgr.get_start(), None) - self.assertEqual(self.mgr.get_end(), None) - self.assertEqual(self.mgr.get_last_tcp_bytes(), 0) - self.assertEqual(self.mgr.get_last_http_bytes(), 0) - self.assertEqual(self.mgr.get_unreported_tcp_bytes(), 0) - self.assertEqual(self.mgr.get_unreported_http_bytes(), 0) - - def testSave(self): - start_ts = datetime.datetime(2013, 1, 31, 12, 10, 30, 123456) - end_ts = start_ts + datetime.timedelta(minutes=5) - tcp_bytes = 1024 - http_bytes = 2048 - unreported_tcp_bytes = 3000 - unreported_http_bytes = 4000 - - self.mgr.save(start_ts, end_ts, - tcp_bytes=tcp_bytes, http_bytes=http_bytes) - self.mgr.read() - - self.assertEqual(self.mgr.get_start(), start_ts) - self.assertEqual(self.mgr.get_end(), end_ts) - self.assertEqual(self.mgr.get_last_tcp_bytes(), tcp_bytes) - self.assertEqual(self.mgr.get_last_http_bytes(), http_bytes) - self.assertEqual(self.mgr.get_unreported_tcp_bytes(), 0) - self.assertEqual(self.mgr.get_unreported_http_bytes(), 0) - - self.mgr.save(start_ts, end_ts, - unreported_tcp_bytes=unreported_tcp_bytes, - unreported_http_bytes=unreported_http_bytes) - self.mgr.read() - - self.assertEqual(self.mgr.get_start(), start_ts) - self.assertEqual(self.mgr.get_end(), end_ts) - self.assertEqual(self.mgr.get_last_tcp_bytes(), 0) - self.assertEqual(self.mgr.get_last_http_bytes(), 0) - self.assertEqual(self.mgr.get_unreported_tcp_bytes(), - unreported_tcp_bytes) - self.assertEqual(self.mgr.get_unreported_http_bytes(), - unreported_http_bytes) - - self.mgr.save(start_ts, end_ts, - tcp_bytes=tcp_bytes, - http_bytes=http_bytes, - unreported_tcp_bytes=unreported_tcp_bytes, - unreported_http_bytes=unreported_http_bytes) - self.mgr.read() - - self.assertEqual(self.mgr.get_start(), start_ts) - self.assertEqual(self.mgr.get_end(), end_ts) - self.assertEqual(self.mgr.get_last_tcp_bytes(), tcp_bytes) - self.assertEqual(self.mgr.get_last_http_bytes(), http_bytes) - self.assertEqual(self.mgr.get_unreported_tcp_bytes(), - unreported_tcp_bytes) - self.assertEqual(self.mgr.get_unreported_http_bytes(), - unreported_http_bytes) diff --git a/libra/worker/__init__.py b/libra/worker/__init__.py deleted file mode 100644 index d0c4f7a7..00000000 --- a/libra/worker/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg -from libra.worker.drivers.base import known_drivers - - -worker_group = cfg.OptGroup('worker', 'Libra Worker options') - -cfg.CONF.register_group(worker_group) - -cfg.CONF.register_opts( - [ - cfg.StrOpt('driver', - default='haproxy', - choices=known_drivers.keys(), - help='Type of device to use'), - cfg.StrOpt('pid', - default='/var/run/libra/libra_worker.pid', - help='PID file'), - ], - group=worker_group -) diff --git a/libra/worker/controller.py b/libra/worker/controller.py deleted file mode 100644 index bc6477f5..00000000 --- a/libra/worker/controller.py +++ /dev/null @@ -1,566 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - -from oslo.config import cfg - -from libra import __version__ as libra_version -from libra import __release__ as libra_release -from libra.common.exc import DeletedStateError -from libra.common.faults import BadRequest -from libra.openstack.common import log -from libra.worker.drivers import base - -LOG = log.getLogger(__name__) - - -class LBaaSController(object): - - NODE_OK = "ENABLED" - NODE_ERR = "DISABLED" - RESPONSE_FAILURE = "FAIL" - RESPONSE_SUCCESS = "PASS" - ACTION_FIELD = 'hpcs_action' - RESPONSE_FIELD = 'hpcs_response' - ERROR_FIELD = 'hpcs_error' - LBLIST_FIELD = 'loadBalancers' - OBJ_STORE_TYPE_FIELD = 'hpcs_object_store_type' - OBJ_STORE_BASEPATH_FIELD = 'hpcs_object_store_basepath' - OBJ_STORE_ENDPOINT_FIELD = 'hpcs_object_store_endpoint' - OBJ_STORE_TOKEN_FIELD = 'hpcs_object_store_token' - - def __init__(self, driver, json_msg): - self.driver = driver - self.msg = json_msg - - def run(self): - """ - Process the JSON message and return a JSON response. - """ - - if self.ACTION_FIELD not in self.msg: - LOG.error("Missing `%s` value" % self.ACTION_FIELD) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - action = self.msg[self.ACTION_FIELD].upper() - - try: - if action == 'UPDATE': - return self._action_update() - elif action == 'SUSPEND': - return self._action_suspend() - elif action == 'ENABLE': - return self._action_enable() - elif action == 'DELETE': - return self._action_delete() - elif action == 'DISCOVER': - return self._action_discover() - elif action == 'ARCHIVE': - return self._action_archive() - elif action == 'METRICS': - return self._action_metrics() - elif action == 'STATS': - return self._action_stats() - elif action == 'DIAGNOSTICS': - return self._action_diagnostic() - else: - LOG.error("Invalid `%s` value: %s", self.ACTION_FIELD, action) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - except Exception as e: - LOG.error("Controller exception: %s, %s", e.__class__, e) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - def _action_diagnostic(self): - """ - Returns the results of a diagnostic run - - This message is used to see if the worker that was built will actually - function as a load balancer - """ - # Gearman test - self.msg['gearman'] = [] - for host_port in cfg.CONF['gearman']['servers']: - host, port = host_port.split(':') - try: - self._check_host(host, int(port)) - except: - self.msg['gearman'].append( - {'host': host, 'status': self.RESPONSE_FAILURE} - ) - else: - self.msg['gearman'].append( - {'host': host, 'status': self.RESPONSE_SUCCESS} - ) - # Outgoing network test - try: - # TODO: make this configurable - self._check_host('google.com', 80) - except: - self.msg['network'] = self.RESPONSE_FAILURE - else: - self.msg['network'] = self.RESPONSE_SUCCESS - - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - def _check_host(self, ip, port): - # TCP connect check to see if floating IP was assigned correctly - sock = socket.socket() - sock.settimeout(5) - try: - sock.connect((ip, port)) - sock.close() - return True - except socket.error: - LOG.error( - "TCP connect error to gearman server {0}" - .format(ip) - ) - raise - - def _set_lb_options(self, protocol, options): - """ - Parse load balancer options. - - options - Dictionary of load balancer options. - - Returns: True on success, False otherwise - """ - - # Default timeout values in milliseconds - client_val = 30000 - server_val = 30000 - connect_val = 30000 - retries_val = 3 - - if 'client_timeout' in options: - client_val = options['client_timeout'] - if 'server_timeout' in options: - server_val = options['server_timeout'] - if 'connect_timeout' in options: - connect_val = options['connect_timeout'] - if 'connect_retries' in options: - retries_val = options['connect_retries'] - - try: - self.driver.set_timeouts(protocol, client_val, server_val, - connect_val, retries_val) - except NotImplementedError: - pass - except Exception as e: - error = "Failed to set timeout values: %s" % e - LOG.error(error) - self.msg[self.ERROR_FIELD] = error - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return False - - return True - - def _action_discover(self): - """ - Return service discovery information. - - This message type is currently used to report the Libra version, - which can be used to determine which messages are supported. - """ - self.msg['version'] = libra_version - self.msg['release'] = libra_release - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - def _action_update(self): - """ - Create/Update a Load Balancer. - - This is the only method (so far) that actually parses the contents - of the JSON message (other than the ACTION_FIELD field). Modifying - the JSON message structure likely means this method will need to - be modified, unless the change involves fields that are ignored. - """ - - try: - self.driver.init() - except NotImplementedError: - pass - except Exception as e: - LOG.error("Selected driver failed initialization.") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - if self.LBLIST_FIELD not in self.msg: - return BadRequest( - "Missing '%s' element" % self.LBLIST_FIELD - ).to_json() - - lb_list = self.msg[self.LBLIST_FIELD] - - for current_lb in lb_list: - if 'nodes' not in current_lb: - return BadRequest("Missing 'nodes' element").to_json() - - if 'protocol' not in current_lb: - return BadRequest( - "Missing required 'protocol' value." - ).to_json() - else: - port = None - if 'port' in current_lb: - port = current_lb['port'] - try: - self.driver.add_protocol(current_lb['protocol'], port) - except NotImplementedError: - LOG.error( - "Selected driver does not support setting protocol." - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - except Exception as e: - LOG.error( - "Failure trying to set protocol: %s, %s" % - (e.__class__, e) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - if 'algorithm' in current_lb: - algo = current_lb['algorithm'].upper() - if algo == 'ROUND_ROBIN': - algo = base.LoadBalancerDriver.ROUNDROBIN - elif algo == 'LEAST_CONNECTIONS': - algo = base.LoadBalancerDriver.LEASTCONN - else: - LOG.error("Invalid algorithm: %s" % algo) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - else: - algo = base.LoadBalancerDriver.ROUNDROBIN - - try: - self.driver.set_algorithm(current_lb['protocol'], algo) - except NotImplementedError: - LOG.error( - "Selected driver does not support setting algorithm." - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - except Exception as e: - LOG.error( - "Selected driver failed setting algorithm." - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - if 'monitor' in current_lb: - monitor = current_lb['monitor'] - for opt in ['type', 'delay', 'timeout', 'attempts']: - if opt not in monitor: - return BadRequest("Missing monitor value '%s'" % - opt).to_json() - if 'path' not in monitor: - monitor['path'] = '/' - - try: - self.driver.add_monitor(current_lb['protocol'], - monitor['type'], - monitor['delay'], - monitor['timeout'], - monitor['attempts'], - monitor['path']) - except NotImplementedError: - LOG.error( - "Selected driver does not support adding healthchecks." - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - except Exception as e: - LOG.error( - "Selected driver failed adding healthchecks: %s, %s" % - (e.__class__, e) - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - - if 'options' in current_lb: - lb_options = current_lb['options'] - else: - lb_options = {} - - # Always call _set_lb_options() since it sets sensible defaults - if not self._set_lb_options(current_lb['protocol'], lb_options): - return self.msg - - for lb_node in current_lb['nodes']: - port = None - address = None - node_id = None - weight = None - backup = False - - if 'port' in lb_node: - port = lb_node['port'] - else: - return BadRequest("Missing node 'port'").to_json() - - if 'address' in lb_node: - address = lb_node['address'] - else: - return BadRequest("Missing node 'address'").to_json() - - if 'id' in lb_node and lb_node['id'] != '': - node_id = lb_node['id'] - else: - return BadRequest("Missing node 'id'").to_json() - - if 'weight' in lb_node: - weight = lb_node['weight'] - - if 'backup' in lb_node and lb_node['backup'].lower() == 'true': - backup = True - - try: - self.driver.add_server(current_lb['protocol'], - node_id, - address, - port, - weight, - backup) - except NotImplementedError: - lb_node['condition'] = self.NODE_ERR - error = "Selected driver does not support adding a server" - LOG.error(error) - self.msg[self.ERROR_FIELD] = error - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - except Exception as e: - lb_node['condition'] = self.NODE_ERR - error = "Failure adding server %s: %s" % (node_id, e) - LOG.error(error) - self.msg[self.ERROR_FIELD] = error - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - return self.msg - else: - LOG.debug("Added server: %s:%s" % (address, port)) - lb_node['condition'] = self.NODE_OK - - try: - self.driver.create() - except NotImplementedError: - LOG.error( - "Selected driver does not support CREATE action." - ) - for current_lb in lb_list: - for lb_node in current_lb['nodes']: - lb_node['condition'] = self.NODE_ERR - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - except Exception as e: - LOG.error("CREATE failed: %s, %s" % (e.__class__, e)) - for lb_node in current_lb['nodes']: - lb_node['condition'] = self.NODE_ERR - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - else: - LOG.info("Activated load balancer changes") - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - - return self.msg - - def _action_suspend(self): - """ Suspend a Load Balancer. """ - try: - self.driver.suspend() - except NotImplementedError: - LOG.error( - "Selected driver does not support SUSPEND action." - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - except Exception as e: - LOG.error("SUSPEND failed: %s, %s" % (e.__class__, e)) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - else: - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - def _action_enable(self): - """ Enable a suspended Load Balancer. """ - try: - self.driver.enable() - except NotImplementedError: - LOG.error( - "Selected driver does not support ENABLE action." - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - except Exception as e: - LOG.error("ENABLE failed: %s, %s" % (e.__class__, e)) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - else: - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - def _action_delete(self): - """ Delete a Load Balancer. """ - try: - self.driver.delete() - except NotImplementedError: - LOG.error( - "Selected driver does not support DELETE action." - ) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - except Exception as e: - LOG.error("DELETE failed: %s, %s" % (e.__class__, e)) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - else: - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - def _action_archive(self): - """ Archive LB log files. """ - - valid_methods = ['swift'] - method = None - params = {} - - if self.OBJ_STORE_TYPE_FIELD not in self.msg: - return BadRequest( - "Missing '%s' element" % self.OBJ_STORE_TYPE_FIELD - ).to_json() - else: - method = self.msg[self.OBJ_STORE_TYPE_FIELD].lower() - - # Validate method type - if method not in valid_methods: - return BadRequest( - "'%s' is not a valid store type" % method - ).to_json() - - # Get parameters for Swift storage - if method == 'swift': - if self.OBJ_STORE_BASEPATH_FIELD not in self.msg: - return BadRequest( - "Missing '%s' element" % self.OBJ_STORE_BASEPATH_FIELD - ).to_json() - if self.OBJ_STORE_ENDPOINT_FIELD not in self.msg: - return BadRequest( - "Missing '%s' element" % self.OBJ_STORE_ENDPOINT_FIELD - ).to_json() - if self.OBJ_STORE_TOKEN_FIELD not in self.msg: - return BadRequest( - "Missing '%s' element" % self.OBJ_STORE_TOKEN_FIELD - ).to_json() - if self.LBLIST_FIELD not in self.msg: - return BadRequest( - "Missing '%s' element" % self.LBLIST_FIELD - ).to_json() - - lb_list = self.msg[self.LBLIST_FIELD] - params['proto'] = lb_list[0]['protocol'] - params['lbid'] = lb_list[0]['id'] - params['basepath'] = self.msg[self.OBJ_STORE_BASEPATH_FIELD] - params['endpoint'] = self.msg[self.OBJ_STORE_ENDPOINT_FIELD] - params['token'] = self.msg[self.OBJ_STORE_TOKEN_FIELD] - - try: - self.driver.archive(method, params) - except NotImplementedError: - error = "Selected driver does not support ARCHIVE action." - LOG.error(error) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = error - except Exception as e: - LOG.error("ARCHIVE failed: %s, %s" % (e.__class__, e)) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = str(e) - else: - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg - - def _action_stats(self): - """ - Get load balancer and node status. - - We push responsibility for knowing what state a load balancer - current is in to the driver. Trying to get status for a LB that - has been deleted is an error. - """ - - try: - nodes = self.driver.get_status() - except NotImplementedError: - error = "Selected driver does not support STATS action." - LOG.error(error) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = error - except DeletedStateError: - error = "Invalid operation STATS on a deleted LB." - LOG.error(error) - self.msg['status'] = 'DELETED' - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = error - except Exception as e: - LOG.error("STATS failed: %s, %s" % (e.__class__, e)) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = str(e) - else: - self.msg['nodes'] = [] - for node, status in nodes: - self.msg['nodes'].append({'id': node, 'status': status}) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - - return self.msg - - def _action_metrics(self): - """ - Get load balancer metrics - - This type of request gets the number of bytes out for each load - balancer defined on the device. If both a TCP and HTTP load - balancer exist, we report on each in a single response. - """ - - try: - start, end, statistics = self.driver.get_statistics() - except NotImplementedError: - error = "Selected driver does not support METRICS action." - LOG.error(error) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = error - return self.msg - except DeletedStateError: - error = "Invalid operation METRICS on a deleted LB." - LOG.error(error) - self.msg['status'] = 'DELETED' - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = error - return self.msg - except Exception as e: - LOG.error("METRICS failed: %s, %s" % (e.__class__, e)) - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE - self.msg[self.ERROR_FIELD] = str(e) - return self.msg - - self.msg['utc_start'] = start - self.msg['utc_end'] = end - self.msg['loadBalancers'] = [] - - # We should have a list of tuples pairing the number of bytes - # out with the protocol/LB. - for proto, bytes_out in statistics: - self.msg['loadBalancers'].append({'protocol': proto, - 'bytes_out': bytes_out}) - - self.msg[self.RESPONSE_FIELD] = self.RESPONSE_SUCCESS - return self.msg diff --git a/libra/worker/drivers/__init__.py b/libra/worker/drivers/__init__.py deleted file mode 100644 index 582348cb..00000000 --- a/libra/worker/drivers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/libra/worker/drivers/base.py b/libra/worker/drivers/base.py deleted file mode 100644 index 67c17ae1..00000000 --- a/libra/worker/drivers/base.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations - - -# Mapping of --driver options to a class -known_drivers = { - 'haproxy': 'libra.worker.drivers.haproxy.driver.HAProxyDriver' -} - - -class LoadBalancerDriver(object): - """ - Load balancer device driver base class. - - This defines the API for interacting with various load balancing - appliances. Drivers for these appliances should inherit from this - class and implement the relevant API methods that it can support. - - Generally, an appliance driver should queue up any changes made - via these API calls until the create() method is called. - - This design allows for a single load balancer to support multiple - protocols simultaneously. Each protocol added via the add_protocol() - method is assumed to be unique, and one protocol per port. This same - protocol is then supplied to other methods (e.g., add_server() and - set_algorithm()) to make changes for that specific protocol. - """ - - # Load balancer algorithms - ROUNDROBIN = 1 - LEASTCONN = 2 - - def init(self): - """ Allows the driver to do any initialization for a new config. """ - raise NotImplementedError() - - def add_protocol(self, protocol, port): - """ Add a supported protocol and listening port for the instance. """ - raise NotImplementedError() - - def add_server(self, protocol, host, port, weight, backup): - """ Add a server for the protocol for which we will proxy. """ - raise NotImplementedError() - - def set_algorithm(self, protocol, algo): - """ Set the algorithm used by the load balancer for this protocol. """ - raise NotImplementedError() - - def add_monitor(self, protocol, mtype, delay, timeout, attempts, path): - """ - Add a health check monitor for this protocol. - - protocol - Protocol of the load balancer (HTTP, TCP) - mtype - Monitor type (CONNECT, HTTP) - delay - Minimum time in seconds between regular calls to a monitor. - timeout - Maximum number of seconds for a monitor to wait for a connection - to be established to the node before it times out. The value must - be less than the delay value. - attempts - Number of permissible monitor failures before removing a node from - rotation. - path - The HTTP path used in the HTTP request by the monitor. This must - be a string beginning with a / (forward slash). The monitor - expects a response from the node with an HTTP status code of 200. - """ - raise NotImplementedError() - - def create(self): - """ Create the load balancer. """ - raise NotImplementedError() - - def suspend(self): - """ Suspend the load balancer. """ - raise NotImplementedError() - - def enable(self): - """ Enable a suspended load balancer. """ - raise NotImplementedError() - - def delete(self): - """ Delete a load balancer. """ - raise NotImplementedError() - - def get_status(self, protocol): - """ - Get load balancer status for specified protocol. - - Returns a list of tuples containing (in this order): - - node ID - - node status - """ - raise NotImplementedError() - - def get_statistics(self): - """ - Get load balancer statistics for all LBs on the device. - - Returns a tuple containing (in this order): - - start timestamp for the reporting period as a string - - end timestamp for the reporting period as a string - - list of tuples containing (in this order): - - protocol for the LB ('tcp' or 'http') as a string - - bytes out for this LB for this reporting period as an int - """ - raise NotImplementedError() - - def archive(self, method, params): - """ Archive the load balancer logs using the specified method. """ - raise NotImplementedError() - - def set_timeouts(self, protocol, client_timeout, server_timeout, - connect_timeout, connect_retries): - """ - Set the various timeout values for the specified protocol. - """ - raise NotImplementedError() diff --git a/libra/worker/drivers/haproxy/__init__.py b/libra/worker/drivers/haproxy/__init__.py deleted file mode 100644 index b81ee247..00000000 --- a/libra/worker/drivers/haproxy/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg -from libra.worker.drivers.haproxy.services_base import haproxy_services - -haproxy_group = cfg.OptGroup('worker:haproxy', 'Worker HAProxy options') - -cfg.CONF.register_group(haproxy_group) - -cfg.CONF.register_opts( - [ - cfg.StrOpt('service', - choices=haproxy_services.keys(), - default='ubuntu', - help='OS services to use with HAProxy driver'), - cfg.StrOpt('logfile', - default='/var/log/haproxy.log', - help='Location of HAProxy logfile'), - cfg.StrOpt('statsfile', - default='/var/log/haproxy.stats', - help='Location of the HAProxy statistics cache file'), - ], - group=haproxy_group -) diff --git a/libra/worker/drivers/haproxy/driver.py b/libra/worker/drivers/haproxy/driver.py deleted file mode 100644 index e6d03f36..00000000 --- a/libra/worker/drivers/haproxy/driver.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import gzip -import hashlib -import os -import re -from datetime import datetime -from swiftclient import client as sc - -from libra.openstack.common import importutils -from libra.worker.drivers.base import LoadBalancerDriver -from libra.worker.drivers.haproxy.services_base import ServicesBase - - -class HAProxyDriver(LoadBalancerDriver): - - def __init__(self, ossvc, user, group, haproxy_logfile=None): - self.haproxy_log = haproxy_logfile - self.user = user - self.group = group - ossvc_driver = importutils.import_class(ossvc) - self.ossvc = ossvc_driver() - if not isinstance(self.ossvc, ServicesBase): - raise Exception('Class is not derived from ServicesBase: %s' % - ossvc.__class__) - self._init_config() - - def _init_config(self): - self._config = dict() - - def _bind(self, protocol, address, port): - self._config[protocol]['bind_address'] = address - self._config[protocol]['bind_port'] = port - - def _config_to_string(self): - """ - Use whatever configuration parameters have been set to generate - output suitable for a HAProxy configuration file. - """ - stats_socket = "/var/run/haproxy-stats.socket" - output = [] - output.append('global') - output.append(' daemon') - output.append(' log /dev/log local0') - output.append(' maxconn 4096') - output.append(' user haproxy') - output.append(' group haproxy') - - # group can be None, but user cannot - if self.group is None: - output.append( - ' stats socket %s user %s mode operator' % - (stats_socket, self.user) - ) - else: - output.append( - ' stats socket %s user %s group %s mode operator' % - (stats_socket, self.user, self.group) - ) - - output.append('defaults') - output.append(' log global') - output.append(' option dontlognull') - output.append(' option redispatch') - output.append(' maxconn 50000') - - for proto in self._config: - protocfg = self._config[proto] - real_proto = proto - if proto == 'galera': - real_proto = 'tcp' - - # ------------------------ - # Frontend configuration - # ------------------------ - output.append('frontend %s-in' % real_proto) - output.append(' mode %s' % real_proto) - output.append(' bind %s:%s' % (protocfg['bind_address'], - protocfg['bind_port'])) - output.append(' timeout client %sms' % - protocfg['timeouts']['timeout_client']) - output.append(' default_backend %s-servers' % real_proto) - - # HTTP specific options for the frontend - if real_proto == 'http': - output.append(' option httplog') - # TCP specific options for the frontend - elif real_proto == 'tcp': - output.append(' option tcplog') - - # ------------------------ - # Backend configuration - # ------------------------ - - output.append('backend %s-servers' % real_proto) - output.append(' mode %s' % real_proto) - output.append(' balance %s' % protocfg['algorithm']) - output.append(' timeout connect %sms' % - protocfg['timeouts']['timeout_connect']) - output.append(' timeout server %sms' % - protocfg['timeouts']['timeout_server']) - output.append(' retries %s' % protocfg['timeouts']['retries']) - - # default healthcheck if none specified - monitor = 'check inter 30s' - - # HTTP specific options for the backend - if real_proto == 'http': - output.append(' cookie SERVERID insert indirect') - output.append(' option httpclose') - output.append(' option forwardfor') - - if 'monitor' in self._config[proto]: - mon = self._config[proto]['monitor'] - if mon['type'] == 'http': - output.append(' option httpchk GET %s' % - mon['path']) - # our timeout will be connect + read time - output.append(' timeout check %ds' % mon['timeout']) - # intentionally set rise/fall to the same value - monitor = "check inter %ds rise %d fall %d" % ( - mon['delay'], mon['attempts'], mon['attempts']) - - for (node_id, addr, port, wt, bkup) in protocfg['servers']: - if bkup: - output.append( - ' server id-%s %s:%s backup cookie id-%s' - ' weight %d %s' % - (node_id, addr, port, node_id, wt, monitor) - ) - else: - output.append( - ' server id-%s %s:%s cookie id-%s' - ' weight %d %s' % - (node_id, addr, port, node_id, wt, monitor) - ) - - # TCP or Galera specific options for the backend - # - # The Galera protocol is a convenience option that lets us set - # our TCP options specifically for load balancing between Galera - # database nodes in a manner that helps avoid deadlocks. A main - # node is chosen which will act as the 'write' node, sending all - # updates to this one node. - - else: - - # No stick table for Galera protocol since we want to return to - # the main backend node once it is available after being down. - if proto == 'tcp': - # Allow session stickiness for TCP connections. The 'size' - # value affects memory usage (about 50 bytes per entry). - output.append( - ' stick-table type ip size 200k expire 30m' - ) - output.append(' stick store-request src') - output.append(' stick match src') - - if 'monitor' in self._config[proto]: - mon = self._config[proto]['monitor'] - if mon['type'] == 'http': - output.append(' option httpchk GET %s' % - mon['path']) - # our timeout will be connect + read time - output.append(' timeout check %ds' % mon['timeout']) - # intentionally set rise/fall to the same value - monitor = "check inter %ds rise %d fall %d" % ( - mon['delay'], mon['attempts'], mon['attempts']) - - for (node_id, addr, port, wt, bkup) in protocfg['servers']: - if bkup: - output.append( - ' server id-%s %s:%s backup weight %d %s' % - (node_id, addr, port, wt, monitor) - ) - else: - output.append( - ' server id-%s %s:%s weight %d %s' % - (node_id, addr, port, wt, monitor) - ) - - return '\n'.join(output) + '\n' - - def _archive_swift(self, endpoint, token, basepath, lbid, proto): - """ - Archive HAProxy log files into swift. - - endpoint - Object store endpoint - token - Authorization token - basepath - Container base path - lbid - Load balancer ID - proto - Protocol of the load balancer we are archiving - - Note: It should be acceptable for exceptions to be thrown here as - the controller should wrap these up nicely in a message back to the - API server. - """ - - proto = proto.lower() - - if not os.path.exists(self.haproxy_log): - raise Exception('No HAProxy logs found') - - # We need a copy we can read - reallog_copy = '/tmp/haproxy.log' - self.ossvc.sudo_copy(self.haproxy_log, reallog_copy) - self.ossvc.sudo_chown(reallog_copy, self.user, self.group) - - # Extract contents from the log based on protocol. This is - # because each protocol (tcp or http) represents a separate - # load balancer in Libra. See _config_to_string() for the - # frontend and backend names we search for below. - - filtered_log = '/tmp/haproxy-' + proto + '.log' - fh = open(filtered_log, 'wb') - for line in open(reallog_copy, 'rb'): - if re.search(proto + '-in', line): - fh.write(line) - elif re.search(proto + '-servers', line): - fh.write(line) - fh.close() - os.remove(reallog_copy) - - # Compress the filtered log and generate the MD5 checksum value. - # We generate object name using UTC timestamp. The MD5 checksum of - # the compressed file is used to guarantee Swift properly receives - # the file contents. - - ts = datetime.utcnow().strftime('%Y%m%d-%H%M%S') - objname = 'haproxy-' + ts + '.log.gz' - compressed_file = '/tmp/' + objname - - gzip_in = open(filtered_log, 'rb') - gzip_out = gzip.open(compressed_file, 'wb') - gzip_out.writelines(gzip_in) - gzip_out.close() - gzip_in.close() - os.remove(filtered_log) - - etag = hashlib.md5(open(compressed_file, 'rb').read()).hexdigest() - - # We now have a file to send to Swift for storage. We'll connect - # using the pre-authorized token passed to use for the given endpoint. - # Then make sure that we have a proper container name for this load - # balancer, and place the compressed file in that container. Creating - # containers is idempotent so no need to check if it already exists. - - object_path = '/'.join([lbid, objname]) - logfh = open(compressed_file, 'rb') - - try: - conn = sc.Connection(preauthurl=endpoint, preauthtoken=token) - conn.put_container(basepath) - conn.put_object(container=basepath, - obj=object_path, - etag=etag, - contents=logfh) - except Exception as e: - logfh.close() - os.remove(compressed_file) - errmsg = "Failure during Swift operations. Swift enabled?" - errmsg = errmsg + "\nException was: %s" % e - raise Exception(errmsg) - - logfh.close() - os.remove(compressed_file) - - #################### - # Driver API Methods - #################### - - def init(self): - self._init_config() - - def add_protocol(self, protocol, port=None): - proto = protocol.lower() - if proto not in ('tcp', 'http', 'galera'): - raise Exception("Unsupported protocol: %s" % protocol) - if proto in self._config: - raise Exception("Protocol '%s' is already defined." % protocol) - else: - self._config[proto] = dict() - - if port is None: - if proto in ('tcp', 'galera'): - raise Exception('Port is required for this protocol.') - elif proto == 'http': - self._bind(proto, '0.0.0.0', 80) - else: - self._bind(proto, '0.0.0.0', port) - - def add_server(self, protocol, node_id, host, port, - weight=1, backup=False): - proto = protocol.lower() - if weight is None: - weight = 1 - - try: - weight = int(weight) - except ValueError: - raise Exception("Non-integer 'weight' value: '%s'" % weight) - - if weight > 256: - raise Exception("Server 'weight' %d exceeds max of 256" % weight) - - if 'servers' not in self._config[proto]: - self._config[proto]['servers'] = [] - - if proto == 'galera': - for (n, h, p, w, b) in self._config[proto]['servers']: - if b is False and backup is False: - raise Exception("Galera protocol does not accept more" - " than one non-backup node") - - self._config[proto]['servers'].append((node_id, host, port, - weight, backup)) - - def set_algorithm(self, protocol, algo): - proto = protocol.lower() - if algo == self.ROUNDROBIN: - self._config[proto]['algorithm'] = 'roundrobin' - elif algo == self.LEASTCONN: - self._config[proto]['algorithm'] = 'leastconn' - else: - raise Exception('Invalid algorithm: %s' % protocol) - - def add_monitor(self, protocol, mtype, delay, timeout, attempts, path): - proto = protocol.lower() - if mtype.lower() not in ['connect', 'http']: - raise Exception('Invalid monitor type: %s' % mtype) - - # default values - if delay is None: - delay = 30 - if attempts is None: - attempts = 2 - if timeout is None: - timeout = delay - if (path is None) or (len(path) == 0): - path = '/' - - if path[0] != '/': - path = '/' + path - - try: - delay = int(delay) - except ValueError: - raise Exception("Non-integer 'delay' value: '%s'" % delay) - - try: - timeout = int(timeout) - except ValueError: - raise Exception("Non-integer 'timeout' value: '%s'" % timeout) - - try: - attempts = int(attempts) - except ValueError: - raise Exception("Non-integer 'attempts' value: '%s'" % attempts) - - if timeout > delay: - raise Exception("Timeout cannot be greater than delay") - - self._config[proto]['monitor'] = {'type': mtype.lower(), - 'delay': delay, - 'timeout': timeout, - 'attempts': attempts, - 'path': path} - - def create(self): - self.ossvc.write_config(self._config_to_string()) - self.ossvc.service_reload() - - def suspend(self): - self.ossvc.service_stop() - - def enable(self): - self.ossvc.service_start() - - def delete(self): - self.ossvc.service_stop() - self.ossvc.remove_configs() - self.ossvc.sudo_rm(self.haproxy_log) - # Since haproxy should be logging via syslog, we need a syslog - # restart, otherwise the log file will be kept open and not reappear. - self.ossvc.syslog_restart() - - def get_status(self, protocol=None): - return self.ossvc.get_status(protocol) - - def get_statistics(self): - return self.ossvc.get_statistics() - - def archive(self, method, params): - """ - Implementation of the archive() API call. - - method - Method we use for archiving the files. - - params - Dictionary with parameters needed for archiving. The keys of - the dictionary will vary based on the value of 'method'. - """ - - if method == 'swift': - return self._archive_swift(params['endpoint'], - params['token'], - params['basepath'], - params['lbid'], - params['proto']) - else: - raise Exception("Driver does not support archive method '%s'" % - method) - - def set_timeouts(self, protocol, client_timeout, server_timeout, - connect_timeout, connect_retries): - protocol = protocol.lower() - self._config[protocol]['timeouts'] = { - 'timeout_client': client_timeout, - 'timeout_server': server_timeout, - 'timeout_connect': connect_timeout, - 'retries': connect_retries - } diff --git a/libra/worker/drivers/haproxy/query.py b/libra/worker/drivers/haproxy/query.py deleted file mode 100644 index 60a7502f..00000000 --- a/libra/worker/drivers/haproxy/query.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import subprocess - - -class HAProxyQuery(object): - """ - Class used for querying the HAProxy statistics socket. - - The CSV output is defined in the HAProxy documentation: - - http://cbonte.github.io/haproxy-dconv/configuration-1.4.html#9 - """ - - def __init__(self, stats_socket): - """ - stats_socket - Path to the HAProxy statistics socket file. - """ - self.socket = stats_socket - - def _query(self, query): - """ - Send the given query to the haproxy statistics socket. - - Return the output of a successful query as a string with trailing - newlines removed, or raise an Exception if the query fails. - """ - cmd = 'echo "%s" | /usr/bin/socat stdio %s' % \ - (query, self.socket) - - try: - output = subprocess.check_output(cmd, shell=True) - except subprocess.CalledProcessError: - raise Exception("HAProxy '%s' query failed." % query) - - return output.rstrip() - - def show_info(self): - """ Get and parse output from 'show info' command. """ - results = self._query('show info') - list_results = results.split('\n') - # TODO: Parse the results into a well defined format. - return list_results - - def show_stat(self, proxy_iid=-1, object_type=-1, server_id=-1): - """ - Get and parse output from 'show status' command. - - proxy_iid - Proxy ID (column 27 in CSV output). -1 for all. - - object_type - Select the type of dumpable object. Values can be ORed. - -1 - everything - 1 - frontends - 2 - backends - 4 - servers - - server_id - Server ID (column 28 in CSV output?), or -1 for everything. - """ - results = self._query('show stat %d %d %d' - % (proxy_iid, object_type, server_id)) - list_results = results.split('\n') - return list_results - - def get_bytes_out(self, protocol=None): - """ - Get bytes out for the given protocol, or all protocols if - not specified. - - Return a dictionary keyed by protocol with bytes out as the value. - """ - if protocol: - filter_string = protocol.lower() + "-servers" - - results = self.show_stat(object_type=2) # backends only - - final_results = {} - for line in results[1:]: - elements = line.split(',') - if protocol and elements[0] != filter_string: - next - else: - proto, ignore = elements[0].split('-') - bytes_out = int(elements[9]) - final_results[proto.lower()] = bytes_out - - return final_results - - def get_server_status(self, protocol=None): - """ - Get status for each server for a protocol backend. - Return a list of tuples containing server name and status. - """ - - if protocol: - filter_string = protocol.lower() + "-servers" - - results = self.show_stat(object_type=4) # servers only - - final_results = [] - for line in results[1:]: - elements = line.split(',') - if protocol and elements[0] != filter_string: - next - else: - # 1 - server name, 17 - status - # Here we look for the new server name form of "id-NNNN" - # where NNNN is the unique node ID. The old form could - # be "serverX", in which case we leave it alone. - if elements[1][0:3] == "id-": - junk, node_id = elements[1].split('-') - else: - node_id = elements[1] - - # All the way up is UP, otherwise call it DOWN - if elements[17] != "UP": - elements[17] = "DOWN" - - final_results.append((node_id, elements[17])) - return final_results diff --git a/libra/worker/drivers/haproxy/services_base.py b/libra/worker/drivers/haproxy/services_base.py deleted file mode 100644 index 2e8b85b9..00000000 --- a/libra/worker/drivers/haproxy/services_base.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# Mapping of --haproxy-services options to a class -haproxy_services = { - 'ubuntu': 'libra.worker.drivers.haproxy.ubuntu_services.UbuntuServices' -} - - -class ServicesBase: - """ - Operating system services needed by the HAProxy driver. - - NOTE: All of these methods must be implemented. - """ - - def syslog_restart(self): - """ Restart syslog daemon. """ - raise NotImplementedError() - - def service_stop(self): - """ Stop the HAProxy service. """ - raise NotImplementedError() - - def service_start(self): - """ Start the HAProxy service. """ - raise NotImplementedError() - - def service_reload(self): - """ Reload the HAProxy config file. """ - raise NotImplementedError() - - def write_config(self, config_str): - """ Write the HAProxy configuration file. """ - raise NotImplementedError() - - def remove_configs(self): - """ Remove current and saved HAProxy config files. """ - raise NotImplementedError() - - def get_status(self, protocol): - """ Get status from HAProxy. """ - raise NotImplementedError() - - def get_statistics(self): - """ Get statistics from HAProxy. """ - raise NotImplementedError() - - def sudo_copy(self, from_file, to_file): - """ Do a privileged file copy. """ - raise NotImplementedError() - - def sudo_chown(self, file, user, group): - """ Do a privileged file ownership change. """ - raise NotImplementedError() - - def sudo_rm(self, file): - """ Do a privileged file delete. """ - raise NotImplementedError() diff --git a/libra/worker/drivers/haproxy/stats.py b/libra/worker/drivers/haproxy/stats.py deleted file mode 100644 index 61c2da57..00000000 --- a/libra/worker/drivers/haproxy/stats.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import os.path -import simplejson - -from libra.openstack.common import log - -LOG = log.getLogger(__name__) - - -class StatisticsManager(object): - """ - Class for managing statistics storage. - - Since HAProxy statistics are reset whenever the haproxy process is - restarted, we need a reliable way of maintaining these values across - restarts. This class attempts to manage the storage of the values. - - There are two types of statistics we record: - - * Unreported stats - These are stats that we need to save because a state change in - the HAProxy service is causing it to restart. Since HAProxy stores - its stats in memory, they would otherwise be lost. We save them here - for consideration in the next METRICS request. - - * Last queried stats - These are total bytes out as reported from HAProxy the last time we - queried it for that information. - """ - - START_FIELD = 'start' - END_FIELD = 'end' - - # UNREPORTED_* values are for unreported statistics due to a restart - UNREPORTED_TCP_BYTES_FIELD = 'unreported_tcp_bytes_out' - UNREPORTED_HTTP_BYTES_FIELD = 'unreported_http_bytes_out' - - # LAST_* values are for values from our last query - LAST_TCP_BYTES_FIELD = 'last_tcp_bytes_out' - LAST_HTTP_BYTES_FIELD = 'last_http_bytes_out' - - def __init__(self, filename): - self.filename = filename - self._object = {} - self.read() - - def _do_save(self, obj): - with open(self.filename, "w") as fp: - simplejson.dump(obj, fp) - - def _format_timestamp(self, ts): - return datetime.datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f') - - def save(self, start, end, tcp_bytes=0, http_bytes=0, - unreported_tcp_bytes=0, unreported_http_bytes=0): - """ - Save HAProxy statistics values, overwriting any existing data. - - start - Start timestamp from our last report. - - end - End timestamp from our last report. - - tcp_bytes - TOTAL bytes out of the TCP backend, as reported by haproxy, - when we last reported them back. - - http_bytes - TOTAL bytes out of the HTTP backend, as reported by haproxy, - when we last reported them back. - - unreported_tcp_bytes - TOTAL bytes out of the TCP backend, as reported by haproxy, - when the service was stopped or restarted. - - unreported_http_bytes - TOTAL bytes out of the HTTP backend, as reported by haproxy, - when the service was stopped or restarted. - """ - if None in [start, end]: - raise Exception('Cannot save None value for timestamps') - - if type(start) != datetime.datetime or type(end) != datetime.datetime: - raise TypeError('Timestamps must be datetime.datetime') - - obj = { - self.START_FIELD: str(start), - self.END_FIELD: str(end), - self.LAST_TCP_BYTES_FIELD: tcp_bytes, - self.LAST_HTTP_BYTES_FIELD: http_bytes, - self.UNREPORTED_TCP_BYTES_FIELD: unreported_tcp_bytes, - self.UNREPORTED_HTTP_BYTES_FIELD: unreported_http_bytes - } - LOG.debug("Saving statistics: %s" % obj) - self._do_save(obj) - - def read(self): - """ Read the current values from the file """ - if not os.path.exists(self.filename): - return - with open(self.filename, "r") as fp: - self._object = simplejson.load(fp) - - def get_start(self): - """ Return last start timestamp as datetime object """ - if self.START_FIELD in self._object: - return self._format_timestamp(self._object[self.START_FIELD]) - return None - - def get_end(self): - """ Return last end timestamp as datetime object """ - if self.END_FIELD in self._object: - return self._format_timestamp(self._object[self.END_FIELD]) - return None - - def get_unreported_tcp_bytes(self): - """ Return TCP unreported bytes out """ - if self.UNREPORTED_TCP_BYTES_FIELD in self._object: - return int(self._object[self.UNREPORTED_TCP_BYTES_FIELD]) - return 0 - - def get_unreported_http_bytes(self): - """ Return HTTP unreported bytes out """ - if self.UNREPORTED_HTTP_BYTES_FIELD in self._object: - return int(self._object[self.UNREPORTED_HTTP_BYTES_FIELD]) - return 0 - - def get_last_tcp_bytes(self): - """ Return TCP last reported bytes out """ - if self.LAST_TCP_BYTES_FIELD in self._object: - return int(self._object[self.LAST_TCP_BYTES_FIELD]) - return 0 - - def get_last_http_bytes(self): - """ Return HTTP last reported bytes out """ - if self.LAST_HTTP_BYTES_FIELD in self._object: - return int(self._object[self.LAST_HTTP_BYTES_FIELD]) - return 0 - - def calculate_new_start(self): - """ - Calculate a new start value for our reporting time range, - which should be just after the last reported end value. If - there is no start value, then we haven't recorded one yet - (i.e., haven't reported any stats yet) so use the current time. - """ - new_start = self.get_end() - if new_start is None: - new_start = datetime.datetime.utcnow() - else: - new_start = new_start + datetime.timedelta(microseconds=1) - return new_start diff --git a/libra/worker/drivers/haproxy/ubuntu_services.py b/libra/worker/drivers/haproxy/ubuntu_services.py deleted file mode 100644 index 8c4aa43f..00000000 --- a/libra/worker/drivers/haproxy/ubuntu_services.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import os -import subprocess - -from oslo.config import cfg - -from libra.common import exc -from libra.openstack.common import log -from libra.worker.drivers.haproxy import query -from libra.worker.drivers.haproxy import services_base -from libra.worker.drivers.haproxy import stats - -LOG = log.getLogger(__name__) - - -class UbuntuServices(services_base.ServicesBase): - """ Ubuntu-specific service implementation. """ - - def __init__(self): - self._haproxy_pid = '/var/run/haproxy.pid' - self._config_file = '/etc/haproxy/haproxy.cfg' - self._backup_config = self._config_file + '.BKUP' - - def _save_unreported(self): - """ - Save current HAProxy totals for an expected restart. - """ - socket_file = '/var/run/haproxy-stats.socket' - - # On a new device, the socket file won't exist. - if not os.path.exists(socket_file): - return - - q = query.HAProxyQuery(socket_file) - results = q.get_bytes_out() - - stats_file = cfg.CONF['worker:haproxy']['statsfile'] - stats_mgr = stats.StatisticsManager(stats_file) - - # need to carry over current values - start = stats_mgr.get_start() - end = stats_mgr.get_end() - - if None in [start, end]: - start = datetime.datetime.utcnow() - end = start - - tcp_bo = stats_mgr.get_last_tcp_bytes() - http_bo = stats_mgr.get_last_http_bytes() - unrpt_tcp_bo = stats_mgr.get_unreported_tcp_bytes() - unrpt_http_bo = stats_mgr.get_unreported_http_bytes() - - curr_tcp_bo = 0 - curr_http_bo = 0 - if 'tcp' in results: - curr_tcp_bo = results['tcp'] - if 'http' in results: - curr_http_bo = results['http'] - - # If we have unreported totals, then we haven't received a METRICS - # call since the last restart and we need to carry over those totals. - curr_tcp_bo += unrpt_tcp_bo - curr_http_bo += unrpt_http_bo - - stats_mgr.save(start, end, - tcp_bytes=tcp_bo, - http_bytes=http_bo, - unreported_tcp_bytes=curr_tcp_bo, - unreported_http_bytes=curr_http_bo) - - def syslog_restart(self): - cmd = '/usr/bin/sudo -n /usr/sbin/service rsyslog restart' - try: - subprocess.check_output(cmd.split()) - except subprocess.CalledProcessError as e: - raise Exception("Failed to restart rsyslog service: %s" % e) - - def service_stop(self): - """ Stop the HAProxy service on the local machine. """ - self._save_unreported() - - cmd = '/usr/bin/sudo -n /usr/sbin/service haproxy stop' - try: - subprocess.check_output(cmd.split()) - except subprocess.CalledProcessError as e: - raise Exception("Failed to stop HAProxy service: %s" % e) - if os.path.exists(self._haproxy_pid): - raise Exception("%s still exists. Stop failed." % - self._haproxy_pid) - - def service_start(self): - """ Start the HAProxy service on the local machine. """ - cmd = '/usr/bin/sudo -n /usr/sbin/service haproxy start' - try: - subprocess.check_output(cmd.split()) - except subprocess.CalledProcessError as e: - raise Exception("Failed to start HAProxy service: %s" % e) - if not os.path.exists(self._haproxy_pid): - raise Exception("%s does not exist. Start failed." % - self._haproxy_pid) - - def service_reload(self): - """ - Reload the HAProxy config file in a non-intrusive manner. - - This assumes that /etc/init.d/haproxy is using the -sf option - to the haproxy process. - """ - self._save_unreported() - - cmd = '/usr/bin/sudo -n /usr/sbin/service haproxy reload' - try: - subprocess.check_output(cmd.split()) - except subprocess.CalledProcessError as e: - raise Exception("Failed to reload HAProxy config: %s" % e) - if not os.path.exists(self._haproxy_pid): - raise Exception("%s does not exist. Reload failed." % - self._haproxy_pid) - - def sudo_copy(self, from_file, to_file): - cmd = "/usr/bin/sudo -n /bin/cp %s %s" % (from_file, to_file) - try: - subprocess.check_output(cmd.split(), - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise Exception("Failed to copy file: %s\n%s" - % (e, e.output.rstrip('\n'))) - - def sudo_chown(self, file, user, group): - if group is None: - cmd = "/usr/bin/sudo -n /bin/chown %s %s" % (user, file) - else: - cmd = "/usr/bin/sudo -n /bin/chown %s:%s %s" % (user, group, file) - try: - subprocess.check_output(cmd.split(), - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise Exception("Failed to change file ownership: %s\n%s" - % (e, e.output.rstrip('\n'))) - - def sudo_rm(self, file): - if not os.path.exists(file): - return - cmd = '/usr/bin/sudo -n /bin/rm -f %s' % file - try: - subprocess.check_output(cmd.split(), - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise Exception("Failed to delete %s\n%s" - % (file, e.output.rstrip('\n'))) - - def write_config(self, config_str): - """ - Generate the new config and replace the current config file. - - We'll first write out a new config to a temporary file, backup - the production config file, then rename the temporary config to the - production config. - """ - tmpfile = '/tmp/haproxy.cfg' - fh = open(tmpfile, 'w') - fh.write(config_str) - fh.close() - - # Validate the config - check_cmd = "/usr/sbin/haproxy -f %s -c" % tmpfile - try: - subprocess.check_output(check_cmd.split(), - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise Exception("Configuration file is invalid: %s\n%s" % - (e, e.output.rstrip('\n'))) - - # Copy any existing configuration file to a backup. - if os.path.exists(self._config_file): - self.sudo_copy(self._config_file, self._backup_config) - - # Move the temporary config file to production version. - move_cmd = "/usr/bin/sudo -n /bin/mv %s %s" % (tmpfile, - self._config_file) - try: - subprocess.check_output(move_cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise Exception("Failed to write configuration file: %s\n%s" - % (e, e.output.rstrip('\n'))) - - def remove_configs(self): - """ Delete current and backup configs on the local machine. """ - self.sudo_rm(self._config_file) - self.sudo_rm(self._backup_config) - - def get_status(self, protocol=None): - """ - Query HAProxy socket for node status on the given protocol. - - protocol - One of the supported protocol names (http or tcp). - - This function will query the HAProxy statistics socket and pull out - the values that it needs for the given protocol (which equates to one - load balancer). - - The output of the socket query is in CSV format and defined here: - - http://cbonte.github.com/haproxy-dconv/configuration-1.4.html#9 - """ - - if not os.path.exists(self._config_file): - raise exc.DeletedStateError("Load balancer is deleted.") - if not os.path.exists(self._haproxy_pid): - raise Exception("HAProxy is not running.") - - q = query.HAProxyQuery('/var/run/haproxy-stats.socket') - return q.get_server_status(protocol) - - def get_statistics(self): - if not os.path.exists(self._config_file): - raise exc.DeletedStateError("Load balancer is deleted.") - if not os.path.exists(self._haproxy_pid): - raise Exception("HAProxy is not running.") - - q = query.HAProxyQuery('/var/run/haproxy-stats.socket') - results = q.get_bytes_out() - - stats_file = cfg.CONF['worker:haproxy']['statsfile'] - stats_mgr = stats.StatisticsManager(stats_file) - - # date range for this report - new_start = stats_mgr.calculate_new_start() - new_end = datetime.datetime.utcnow() - - # previously recorded totals - prev_tcp_bo = stats_mgr.get_last_tcp_bytes() - prev_http_bo = stats_mgr.get_last_http_bytes() - unrpt_tcp_bo = stats_mgr.get_unreported_tcp_bytes() - unrpt_http_bo = stats_mgr.get_unreported_http_bytes() - - # current totals - current_tcp_bo = 0 - current_http_bo = 0 - if 'http' in results: - current_http_bo = results['http'] - if 'tcp' in results: - current_tcp_bo = results['tcp'] - - # If our totals that we previously recorded are greater than the - # totals we have now, and no unreported values, then somehow HAProxy - # was restarted outside of the worker's control, so we have no choice - # but to zero the values to avoid overcharging on usage. - if (unrpt_tcp_bo == 0 and unrpt_http_bo == 0) and \ - ((prev_tcp_bo > current_tcp_bo) or - (prev_http_bo > current_http_bo)): - LOG.warn("Forced reset of HAProxy statistics") - prev_tcp_bo = 0 - prev_http_bo = 0 - - # Record totals for each protocol for comparison in the next request. - stats_mgr.save(new_start, new_end, - tcp_bytes=current_tcp_bo, - http_bytes=current_http_bo) - - # We are to deliver the number of bytes out since our last report, - # not the total, so calculate that here. Some examples: - # - # unreported total(A) | prev total(B) | current(C) | returned value - # | | | A + C - B - # --------------------+---------------+------------+--------------- - # 0 | 0 | 200 | 200 - # 0 | 200 | 1500 | 1300 - # 2000 | 1500 | 100 | 600 - - incremental_results = [] - if 'http' in results: - value = unrpt_http_bo + current_http_bo - prev_http_bo - if value < 0: - LOG.error("Negative statistics value: %d" % value) - incremental_results.append(('http', value)) - if 'tcp' in results: - value = unrpt_tcp_bo + current_tcp_bo - prev_tcp_bo - if value < 0: - LOG.error("Negative statistics value: %d" % value) - incremental_results.append(('tcp', value)) - - return str(new_start), str(new_end), incremental_results diff --git a/libra/worker/main.py b/libra/worker/main.py deleted file mode 100644 index def3a39e..00000000 --- a/libra/worker/main.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import daemon -import daemon.pidfile -import daemon.runner -import getpass -import grp -import logging as std_logging -import pwd -import time -import threading - -from libra import __version__ -from libra.openstack.common import importutils -from libra.openstack.common import log as logging -from libra.common.options import CONF -from libra.common.options import add_common_opts -from libra.common.options import check_gearman_ssl_files -from libra.common.log import get_descriptors -from libra.worker.drivers.base import known_drivers -from libra.worker.drivers.haproxy.services_base import haproxy_services -from libra.worker.worker import config_thread - - -LOG = logging.getLogger(__name__) - - -class EventServer(object): - """ - Encapsulates server activity so we can run it in either daemon or - non-daemon mode. - """ - - def main(self, tasks): - """ - Main method of the server. - - tasks - A tuple with two items: a function name, and a tuple with - that function's arguments. - """ - - try: - check_gearman_ssl_files() - except Exception as e: - LOG.critical(str(e)) - return - - thread_list = [] - - driver = CONF['worker']['driver'] - LOG.info("Selected driver: %s" % driver) - if driver == 'haproxy': - LOG.info("Selected HAProxy service: %s" % - CONF['worker:haproxy']['service']) - LOG.info("Job server list: %s" % CONF['gearman']['servers']) - - for task, task_args in tasks: - thd = threading.Thread(target=task, args=task_args) - thd.daemon = True - thread_list.append(thd) - thd.start() - - while True: - try: - time.sleep(600) - except KeyboardInterrupt: - LOG.info("Non-daemon session terminated") - break - - LOG.info("Shutting down") - - -def main(): - """ Main Python entry point for the worker utility. """ - - add_common_opts() - CONF(project='libra', version=__version__) - - logging.setup('libra') - - LOG.debug('Configuration:') - CONF.log_opt_values(LOG, std_logging.DEBUG) - - # Import the device driver we are going to use. This will be sent - # along to the Gearman task that will use it to communicate with - # the device. - - selected_driver = CONF['worker']['driver'] - driver_class = importutils.import_class(known_drivers[selected_driver]) - - if selected_driver == 'haproxy': - if CONF['user']: - user = CONF['user'] - else: - user = getpass.getuser() - - if CONF['group']: - group = CONF['group'] - else: - group = None - - haproxy_service = CONF['worker:haproxy']['service'] - haproxy_logfile = CONF['worker:haproxy']['logfile'] - driver = driver_class(haproxy_services[haproxy_service], - user, group, - haproxy_logfile=haproxy_logfile) - else: - driver = driver_class() - - server = EventServer() - - # Tasks to execute in parallel - task_list = [ - (config_thread, (driver,)) - ] - - if not CONF['daemon']: - server.main(task_list) - else: - - pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['worker']['pid'], 10) - if daemon.runner.is_pidfile_stale(pidfile): - pidfile.break_lock() - descriptors = get_descriptors() - context = daemon.DaemonContext( - working_directory='/etc/haproxy', - umask=0o022, - pidfile=pidfile, - files_preserve=descriptors - ) - if CONF['user']: - context.uid = pwd.getpwnam(CONF['user']).pw_uid - if CONF['group']: - context.gid = grp.getgrnam(CONF['group']).gr_gid - - context.open() - server.main(task_list) - - return 0 diff --git a/libra/worker/worker.py b/libra/worker/worker.py deleted file mode 100644 index 0aff8ff1..00000000 --- a/libra/worker/worker.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import socket -from libra import gear #TODO -from oslo.config import cfg -from libra.worker.controller import LBaaSController -from libra.openstack.common import log - - -LOG = log.getLogger(__name__) - - -class CustomJSONGearmanWorker(gear.Worker): - """ Custom class we will use to pass arguments to the Gearman task. """ - driver = None - - -def handler(worker, job): - """ - Main Gearman worker task. - - This is the function executed by the Gearman worker for incoming requests - from the Gearman job server. It will be executed once per request. Data - comes in as a JSON object, and a JSON object is returned in response. - """ - driver = worker.driver - - # Hide information that should not be logged - copy = json.loads(job.arguments) - if LBaaSController.OBJ_STORE_TOKEN_FIELD in copy: - copy[LBaaSController.OBJ_STORE_TOKEN_FIELD] = "*****" - - LOG.debug("Received JSON message: %s" % json.dumps(copy)) - - controller = LBaaSController(driver, json.loads(job.arguments)) - response = controller.run() - - # Hide information that should not be logged - copy = response.copy() - if LBaaSController.OBJ_STORE_TOKEN_FIELD in copy: - copy[LBaaSController.OBJ_STORE_TOKEN_FIELD] = "*****" - - LOG.debug("Return JSON message: %s" % json.dumps(copy)) - job.sendWorkComplete(json.dumps(copy)) - - -def config_thread(driver): - """ Worker thread function. """ - # Hostname should be a unique value, like UUID - hostname = socket.gethostname() - LOG.info("Registering task %s" % hostname) - worker = CustomJSONGearmanWorker(hostname) - for host_port in cfg.CONF['gearman']['servers']: - host, port = host_port.split(':') - worker.addServer(host, port, cfg.CONF['gearman']['ssl_key'], - cfg.CONF['gearman']['ssl_cert'], - cfg.CONF['gearman']['ssl_ca']) - worker.registerFunction(hostname) - worker.log = LOG - worker.driver = driver - retry = True - while retry: - try: - job = worker.getJob() - handler(worker, job) - except KeyboardInterrupt: - retry = False - except Exception as e: - LOG.critical("Exception: %s, %s" % (e.__class__, e)) - retry = False - LOG.debug("Worker process terminated.") diff --git a/openstack-common.conf b/openstack-common.conf deleted file mode 100644 index 0f52cddd..00000000 --- a/openstack-common.conf +++ /dev/null @@ -1,14 +0,0 @@ -[DEFAULT] - -# The list of modules to copy from openstack-common - -module=fixture -module=importutils -module=jsonutils -module=notifier -module=xmlutils -module=test - - -# The base module to hold the copy of openstack.common -base=libra diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 68de6b79..00000000 --- a/requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -pbr>=0.6,!=0.7,<1.0 - -Babel>=1.3 -eventlet -# put back once it's patched -# gear -oslo.config>=1.2.1 -python-daemon>=1.6 -python-novaclient>=2.17.0 -python-swiftclient>=2.0.2 -requests>=1.1 -dogapi -pecan -SQLAlchemy>=0.7.8,!=0.9.5,<=0.9.99 -WSME>=0.6 -mysql-connector-python -ipaddress==1.0.4 -six>=1.7.0 -kombu diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 297b9671..00000000 --- a/setup.cfg +++ /dev/null @@ -1,31 +0,0 @@ -[metadata] -name = libra -author = David Shrewsbury , Andrew Hutchings -summary = Python LBaaS Gearman Worker and Pool Manager -description-file = README -home-page = http://pypi.python.org/pypi/libra -classifier = - Development Status :: 4 - Beta - Environment :: Console - Environment :: OpenStack - Intended Audience :: Developers - Intended Audience :: Information Technology - License :: OSI Approved :: Apache Software License - Operating System :: OS Independent - Programming Language :: Python - -[files] -packages = - libra - -[entry_points] -console_scripts = - libra_worker = libra.worker.main:main - libra_pool_mgm = libra.mgm.mgm:main - libra_api = libra.api.app:main - libra_admin_api = libra.admin_api.app:main - -[build_sphinx] -all_files = 1 -build-dir = build/sphinx -source-dir = doc diff --git a/setup.py b/setup.py deleted file mode 100644 index c0a24eab..00000000 --- a/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 7bf0e3b8..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -hacking>=0.5.6,<0.8 - -fixtures>=0.3.12 -mock -python-subunit -sphinx>=1.1.2 -testrepository>=0.0.8 -testtools>=0.9.22 -mox diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 1ea3c22a..00000000 --- a/tox.ini +++ /dev/null @@ -1,30 +0,0 @@ -[tox] -envlist = py27,pep8 -minversion = 1.6 -skipsdist = True - -[testenv] -usedevelop = True -install_command = pip install --allow-external mysql-connector-python --allow-insecure -U {opts} {packages} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = python setup.py testr --slowest --testr-args='{posargs}' - -[tox:jenkins] -downloadcache = ~/cache/pip - -[testenv:pep8] -deps = flake8 -commands = flake8 - -[flake8] -ignore = H -select = H234 -show-source = True -exclude = .venv,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tests,build - -[testenv:cover] -commands = python setup.py testr --coverage --testr-args='{posargs}' - -[testenv:venv] -commands = {posargs}