diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index eafb397..0000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = oslo_messaging -omit = oslo_messaging/tests/*,oslo_messaging/openstack/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 874bacd..0000000 --- a/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -AUTHORS -ChangeLog -*~ -*.swp -*.pyc -*.log -.tox -.coverage -*.egg-info/ -.eggs -*.egg -build/ -doc/build/ -doc/source/api/ -dist/ -.testrepository/ -releasenotes/build diff --git a/.gitreview b/.gitreview deleted file mode 100644 index beb811a..0000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/oslo.messaging.git diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 80e7ea8..0000000 --- a/.testr.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index ef8e98a..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - http://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/oslo.messaging diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 4143aac..0000000 --- a/LICENSE +++ /dev/null @@ -1,204 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - ---- License for python-keystoneclient versions prior to 2.1 --- - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of this project nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.rst b/README.rst deleted file mode 100644 index 3013851..0000000 --- a/README.rst +++ /dev/null @@ -1,18 +0,0 @@ -Oslo Messaging Library -====================== - -.. image:: https://img.shields.io/pypi/v/oslo.messaging.svg - :target: https://pypi.python.org/pypi/oslo.messaging/ - :alt: Latest Version - -.. image:: https://img.shields.io/pypi/dm/oslo.messaging.svg - :target: https://pypi.python.org/pypi/oslo.messaging/ - :alt: Downloads - -The Oslo messaging API supports RPC and notifications over a number of -different messaging transports. - -* License: Apache License, Version 2.0 -* Documentation: http://docs.openstack.org/developer/oslo.messaging -* Source: http://git.openstack.org/cgit/openstack/oslo.messaging -* Bugs: http://bugs.launchpad.net/oslo.messaging diff --git a/README.txt b/README.txt new file mode 100644 index 0000000..9b614ba --- /dev/null +++ b/README.txt @@ -0,0 +1,13 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +Use instead the project deb-python-oslo.messaging at +http://git.openstack.org/cgit/openstack/deb-python-oslo.messaging . + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab8..0000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/doc/source/AMQP1.0.rst b/doc/source/AMQP1.0.rst deleted file mode 100644 index 31c5cc3..0000000 --- a/doc/source/AMQP1.0.rst +++ /dev/null @@ -1,193 +0,0 @@ -------------------------- -AMQP 1.0 Protocol Support -------------------------- - -.. currentmodule:: oslo_messaging - -============ -Introduction -============ - -This release of oslo.messaging includes an experimental driver that -provides support for version 1.0 of the Advanced Message Queuing -Protocol (AMQP 1.0, ISO/IEC 19464). - -The current implementation of this driver is considered -*experimental*. It is not recommended that this driver be used in -production systems. Rather, this driver is being provided as a -*technical preview*, in hopes that it will encourage further testing -by the AMQP 1.0 community. - -More detail regarding the driver's implementation is available from the `specification`_. - -.. _specification: https://git.openstack.org/cgit/openstack/oslo-specs/tree/specs/juno/amqp10-driver-implementation.rst - -============= -Prerequisites -============= - -This driver uses the Apache QPID `Proton`_ AMQP 1.0 protocol engine. -This engine consists of a platform specific library and a python -binding. The driver does not directly interface with the engine API, -as the API is a very low-level interface to the AMQP protocol. -Instead, the driver uses the pure python `Pyngus`_ client API, which -is layered on top of the protocol engine. - -.. _Proton: http://qpid.apache.org/proton/index.html -.. _Pyngus: https://github.com/kgiusti/pyngus - -In order to run the driver the Proton Python bindings, Proton -library, Proton header files, and Pyngus must be installed. - -Pyngus is available via `Pypi`__. - -.. __: https://pypi.python.org/pypi/pyngus - -While the Proton Python bindings are available via `Pypi`__, it -includes a C extension that requires the Proton library and header -files be pre-installed in order for the binding to install properly. -If the target platform's distribution provides a pre-packaged version -of the Proton Python binding (see packages_ below), it is recommended -to use these pre-built packages instead of pulling them from Pypi. - -.. __: https://pypi.python.org/pypi/python-qpid-proton - -The driver also requires a *broker* that supports version 1.0 of the -AMQP protocol. - -The driver has only been tested using `qpidd`_ in a `patched -devstack`_ environment. The version of qpidd **must** be at least -0.26. qpidd also uses the Proton engine for its AMQP 1.0 support, so -the Proton library must be installed on the system hosting the qpidd -daemon. - -.. _qpidd: http://qpid.apache.org/components/cpp-broker/index.html -.. _patched devstack: https://review.openstack.org/#/c/109118/ - -At present, RabbitMQ does not work with this driver. This driver -makes use of the *dynamic* flag on the link Source to automatically -provision a node at the peer. RabbitMQ's AMQP 1.0 implementation has -yet to implement this feature. - -See the `specification`_ for additional information regarding testing -done on the driver. - -============= -Configuration -============= - -driver ------- - -It is recommended to start with the default configuration options -supported by the driver. The remaining configuration steps described -below assume that none of the driver's options have been manually -overridden. - - **Note Well:** The driver currently does **not** support the generic - *amqp* options used by the existing drivers, such as - *amqp_durable_queues* or *amqp_auto_delete*. Support for these are - TBD. - -qpidd ------ - -First, verify that the Proton library has been installed and is -imported by the qpidd broker. This can checked by running:: - - $ qpidd --help - -and looking for the AMQP 1.0 options in the help text. If no AMQP 1.0 -options are listed, verify that the Proton libraries are installed and -that the version of qpidd is greater than or equal to 0.26. - -Second, configure the address patterns used by the driver. This is -done by adding the following to /etc/qpid/qpidd.conf:: - - queue-patterns=exclusive - queue-patterns=unicast - topic-patterns=broadcast - -These patterns, *exclusive*, *unicast*, and *broadcast* are the -default values used by the driver. These can be overridden via the -driver configuration options if desired. If manually overridden, -update the qpidd.conf values to match. - -services --------- - -The new driver is selected by specifying **amqp** as the transport -name. For example:: - - from oslo import messaging - from oslo.config import cfg - - amqp_transport = messaging.get_transport(cfg.CONF, - "amqp://me:passwd@host:5672") - - -The new driver can be loaded and used by existing applications by -specifying *amqp* as the RPC backend in the service's configuration -file. For example, in nova.conf:: - - rpc_backend = amqp - -.. _packages: - -====================== -Platforms and Packages -====================== - -Pyngus is available via Pypi. - -Pre-built packages for the Proton library and qpidd are available for -some popular distributions: - -RHEL and Fedora ---------------- - -Packages exist in EPEL for RHEL/Centos 7, and Fedora 19+. -Unfortunately, RHEL/Centos 6 base packages include a very old version -of qpidd that does not support AMQP 1.0. EPEL's policy does not allow -a newer version of qpidd for RHEL/Centos 6. - -The following packages must be installed on the system running the -qpidd daemon: - -- qpid-cpp-server (version 0.26+) -- qpid-proton-c - -The following packages must be installed on the systems running the -services that use the new driver: - -- Proton libraries: qpid-proton-c-devel -- Proton python bindings: python-qpid-proton -- pyngus (via Pypi) - -Debian and Ubuntu ------------------ - -Packages for the Proton library, headers, and Python bindings are -available in the Debian/Testing repository. Proton packages are not -yet available in the Ubuntu repository. The version of qpidd on both -platforms is too old and does not support AMQP 1.0. - -Until the proper package version arrive the latest packages can be -pulled from the `Apache Qpid PPA`_ on Launchpad:: - - sudo add-apt-repository ppa:qpid/released - -.. _Apache Qpid PPA: https://launchpad.net/~qpid/+archive/ubuntu/released - -The following packages must be installed on the system running the -qpidd daemon: - -- qpidd (version 0.26+) -- libqpid-proton2 - -The following packages must be installed on the systems running the -services that use the new driver: - -- Proton libraries: libqpid-proton2-dev -- Proton python bindings: python-qpid-proton -- pyngus (via Pypi) diff --git a/doc/source/FAQ.rst b/doc/source/FAQ.rst deleted file mode 100644 index 2a67bba..0000000 --- a/doc/source/FAQ.rst +++ /dev/null @@ -1,42 +0,0 @@ -============================ - Frequently Asked Questions -============================ - -I don't need notifications on the message bus. How do I disable them? -===================================================================== - -Notification messages can be disabled using the ``noop`` notify -driver. Set ``driver = noop`` in your configuration file under the -[oslo_messaging_notifications] section. - -Why does the notification publisher create queues, too? Shouldn't the subscriber do that? -========================================================================================= - -The notification messages are meant to be used for integration with -external services, including services that are not part of -OpenStack. To ensure that the subscriber does not miss any messages if -it starts after the publisher, ``oslo.messaging`` ensures that -subscriber queues exist when notifications are sent. - -How do I change the queue names where notifications are published? -================================================================== - -Notifications are published to the configured exchange using a topic -built from a base value specified in the configuration file and the -notification "level". The default topic is ``notifications``, so an -info-level notification is published to the topic -``notifications.info``. A subscriber queue of the same name is created -automatically for each of these topics. To change the queue names, -change the notification topic using the ``topics`` -configuration option in ``[oslo_messaging_notifications]``. The option -accepts a list of values, so it is possible to publish to multiple topics. - -What are the other choices of notification drivers available? -============================================================= - -- messaging Send notifications using the 1.0 message format. -- messagingv2 Send notifications using the 2.0 message format (with a message envelope). -- routing Configurable routing notifier (by priority or event_type). -- log Publish notifications via Python logging infrastructure. -- test Store notifications in memory for test verification. -- noop Disable sending notifications entirely. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 6008861..0000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import subprocess -import sys -import warnings - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'oslosphinx', - 'stevedore.sphinxext', - 'oslo_config.sphinxext', -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# Add any paths that contain templates here, relative to this directory. -# templates_path = [] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'oslo.messaging' -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", - "-n1"] -try: - html_last_updated_fmt = subprocess.Popen( - git_cmd, stdout=subprocess.PIPE).communicate()[0] -except Exception: - warnings.warn('Cannot get last updated time from git repository. ' - 'Not setting "html_last_updated_fmt".') - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] diff --git a/doc/source/conffixture.rst b/doc/source/conffixture.rst deleted file mode 100644 index e66887d..0000000 --- a/doc/source/conffixture.rst +++ /dev/null @@ -1,9 +0,0 @@ ----------------------- -Testing Configurations ----------------------- - -.. currentmodule:: oslo_messaging.conffixture - -.. autoclass:: ConfFixture - :members: - diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index 2ca75d1..0000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,5 +0,0 @@ -============== - Contributing -============== - -.. include:: ../../CONTRIBUTING.rst diff --git a/doc/source/drivers.rst b/doc/source/drivers.rst deleted file mode 100644 index 21b021a..0000000 --- a/doc/source/drivers.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================== - Available Drivers -=================== - -.. list-plugins:: oslo.messaging.drivers - :detailed: diff --git a/doc/source/exceptions.rst b/doc/source/exceptions.rst deleted file mode 100644 index 2f01705..0000000 --- a/doc/source/exceptions.rst +++ /dev/null @@ -1,18 +0,0 @@ ----------- -Exceptions ----------- - -.. currentmodule:: oslo_messaging - -.. autoexception:: ClientSendError -.. autoexception:: DriverLoadFailure -.. autoexception:: ExecutorLoadFailure -.. autoexception:: InvalidTransportURL -.. autoexception:: MessagingException -.. autoexception:: MessagingTimeout -.. autoexception:: MessagingServerError -.. autoexception:: NoSuchMethod -.. autoexception:: RPCDispatcherError -.. autoexception:: RPCVersionCapError -.. autoexception:: ServerListenError -.. autoexception:: UnsupportedVersion diff --git a/doc/source/executors.rst b/doc/source/executors.rst deleted file mode 100644 index 2fde5b8..0000000 --- a/doc/source/executors.rst +++ /dev/null @@ -1,13 +0,0 @@ -========= -Executors -========= - -Executors are providing the way an incoming message will be dispatched so that -the message can be used for meaningful work. Different types of executors are -supported, each with its own set of restrictions and capabilities. - -Available Executors -=================== - -.. list-plugins:: oslo.messaging.executors - :detailed: diff --git a/doc/source/history.rst b/doc/source/history.rst deleted file mode 100644 index 69ed4fe..0000000 --- a/doc/source/history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../ChangeLog diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 9a6873c..0000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,46 +0,0 @@ -oslo.messaging -============== - -The Oslo messaging API supports RPC and notifications over a number of -different messaging transports. - -Contents -======== - -.. toctree:: - :maxdepth: 1 - - transport - executors - target - server - rpcclient - notifier - notification_driver - notification_listener - serializer - exceptions - opts - conffixture - drivers - supported-messaging-drivers - AMQP1.0 - zmq_driver - FAQ - contributing - -Release Notes -============= - -.. toctree:: - :maxdepth: 1 - - history - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/source/notification_driver.rst b/doc/source/notification_driver.rst deleted file mode 100644 index d7368d8..0000000 --- a/doc/source/notification_driver.rst +++ /dev/null @@ -1,15 +0,0 @@ -------------------- -Notification Driver -------------------- - -.. automodule:: oslo_messaging.notify.messaging - -.. autoclass:: MessagingDriver - -.. autoclass:: MessagingV2Driver - -.. currentmodule:: oslo_messaging.notify.notifier - -.. autoclass:: Driver - :members: - :noindex: diff --git a/doc/source/notification_listener.rst b/doc/source/notification_listener.rst deleted file mode 100644 index 3a69070..0000000 --- a/doc/source/notification_listener.rst +++ /dev/null @@ -1,16 +0,0 @@ ---------------------- -Notification Listener ---------------------- - -.. automodule:: oslo_messaging.notify.listener - -.. currentmodule:: oslo_messaging - -.. autofunction:: get_notification_listener - -.. autoclass:: MessageHandlingServer - :members: - :noindex: - -.. autofunction:: get_local_context - :noindex: diff --git a/doc/source/notifier.rst b/doc/source/notifier.rst deleted file mode 100644 index 4156b1e..0000000 --- a/doc/source/notifier.rst +++ /dev/null @@ -1,20 +0,0 @@ -========== - Notifier -========== - -.. currentmodule:: oslo_messaging - -.. autoclass:: Notifier - :members: - -.. autoclass:: LoggingNotificationHandler - :members: - -.. autoclass:: LoggingErrorNotificationHandler - :members: - -Available Notifier Drivers -========================== - -.. list-plugins:: oslo.messaging.notify.drivers - :detailed: diff --git a/doc/source/opts.rst b/doc/source/opts.rst deleted file mode 100644 index 9d94c18..0000000 --- a/doc/source/opts.rst +++ /dev/null @@ -1,16 +0,0 @@ -======================= - Configuration Options -======================= - -oslo.messaging uses oslo.config to define and manage configuration -options to allow the deployer to control how an application uses the -underlying messaging system. - -.. show-options:: oslo.messaging - -API -=== - -.. currentmodule:: oslo_messaging.opts - -.. autofunction:: list_opts diff --git a/doc/source/pika_driver.rst b/doc/source/pika_driver.rst deleted file mode 100644 index 508aaa6..0000000 --- a/doc/source/pika_driver.rst +++ /dev/null @@ -1,156 +0,0 @@ ------------------------------- -Pika Driver Deployment Guide ------------------------------- - -.. currentmodule:: oslo_messaging - -============ -Introduction -============ - -Pika is a pure-Python implementation of the AMQP 0-9-1 protocol including -RabbitMQ's extensions. It is very actively supported and recommended by -RabbitMQ developers - -======== -Abstract -======== - -PikaDriver is one of oslo.messaging backend drivers. It supports RPC and Notify -patterns. Currently it could be the only oslo.messaging driver across the -OpenStack cluster. This document provides deployment information for this -driver in oslo_messaging. - -This driver is able to work with single instance of RabbitMQ server or -RabbitMQ cluster. - - -============= -Configuration -============= - -Enabling (mandatory) --------------------- - -To enable the driver, in the section [DEFAULT] of the conf file, -the 'transport_url' parameter should be set to -`pika://user:pass@host1:port[,hostN:portN]` - - [DEFAULT] - transport_url = pika://guest:guest@localhost:5672 - - -Connection options (optional) ------------------------------ - -In section [oslo_messaging_pika]: -#. channel_max - Maximum number of channels to allow, - -#. frame_max (default - pika default value): The maximum byte size for - an AMQP frame, - -#. heartbeat_interval (default=1): How often to send heartbeats for - consumer's connections in seconds. If 0 - disable heartbeats, - -#. ssl (default=False): Enable SSL if True, - -#. ssl_options (default=None): Arguments passed to ssl.wrap_socket, - -#. socket_timeout (default=0.25): Set timeout for opening new connection's - socket, - -#. tcp_user_timeout (default=0.25): Set TCP_USER_TIMEOUT in seconds for - connection's socket, - -#. host_connection_reconnect_delay (default=0.25): Set delay for reconnection - to some host after connection error - - -Connection pool options (optional) ----------------------------------- - -In section [oslo_messaging_pika]: - -#. pool_max_size (default=10): Maximum number of connections to keep queued, - -#. pool_max_overflow (default=0): Maximum number of connections to create above - `pool_max_size`, - -#. pool_timeout (default=30): Default number of seconds to wait for a - connections to available, - -#. pool_recycle (default=600): Lifetime of a connection (since creation) in - seconds or None for no recycling. Expired connections are closed on acquire, - -#. pool_stale (default=60): Threshold at which inactive (since release) - connections are considered stale in seconds or None for no staleness. - Stale connections are closed on acquire.") - -RPC related options (optional) ------------------------------- - -In section [oslo_messaging_pika]: - -#. rpc_queue_expiration (default=60): Time to live for rpc queues without - consumers in seconds, - -#. default_rpc_exchange (default="${control_exchange}_rpc"): Exchange name for - sending RPC messages, - -#. rpc_reply_exchange', default=("${control_exchange}_rpc_reply"): Exchange - name for receiving RPC replies, - -#. rpc_listener_prefetch_count (default=100): Max number of not acknowledged - message which RabbitMQ can send to rpc listener, - -#. rpc_reply_listener_prefetch_count (default=100): Max number of not - acknowledged message which RabbitMQ can send to rpc reply listener, - -#. rpc_reply_retry_attempts (default=-1): Reconnecting retry count in case of - connectivity problem during sending reply. -1 means infinite retry during - rpc_timeout, - -#. rpc_reply_retry_delay (default=0.25) Reconnecting retry delay in case of - connectivity problem during sending reply, - -#. default_rpc_retry_attempts (default=-1): Reconnecting retry count in case of - connectivity problem during sending RPC message, -1 means infinite retry. If - actual retry attempts in not 0 the rpc request could be processed more then - one time, - -#. rpc_retry_delay (default=0.25): Reconnecting retry delay in case of - connectivity problem during sending RPC message - -$control_exchange in this code is value of [DEFAULT].control_exchange option, -which is "openstack" by default - -Notification related options (optional) ---------------------------------------- - -In section [oslo_messaging_pika]: - -#. notification_persistence (default=False): Persist notification messages, - -#. default_notification_exchange (default="${control_exchange}_notification"): - Exchange name for for sending notifications, - -#. notification_listener_prefetch_count (default=100): Max number of not - acknowledged message which RabbitMQ can send to notification listener, - -#. default_notification_retry_attempts (default=-1): Reconnecting retry count - in case of connectivity problem during sending notification, -1 means - infinite retry, - -#. notification_retry_delay (default=0.25): Reconnecting retry delay in case of - connectivity problem during sending notification message - -$control_exchange in this code is value of [DEFAULT].control_exchange option, -which is "openstack" by default - -DevStack Support ----------------- - -Pika driver is supported by DevStack. To enable it you should edit -local.conf [localrc] section and add next there: - - enable_plugin pika https://git.openstack.org/openstack/devstack-plugin-pika diff --git a/doc/source/rpcclient.rst b/doc/source/rpcclient.rst deleted file mode 100644 index 60da0cc..0000000 --- a/doc/source/rpcclient.rst +++ /dev/null @@ -1,10 +0,0 @@ ----------- -RPC Client ----------- - -.. currentmodule:: oslo_messaging - -.. autoclass:: RPCClient - :members: - -.. autoexception:: RemoteError diff --git a/doc/source/serializer.rst b/doc/source/serializer.rst deleted file mode 100644 index 64b5d45..0000000 --- a/doc/source/serializer.rst +++ /dev/null @@ -1,10 +0,0 @@ ----------- -Serializer ----------- - -.. currentmodule:: oslo_messaging - -.. autoclass:: Serializer - :members: - -.. autoclass:: NoOpSerializer diff --git a/doc/source/server.rst b/doc/source/server.rst deleted file mode 100644 index 36caa04..0000000 --- a/doc/source/server.rst +++ /dev/null @@ -1,20 +0,0 @@ ------- -Server ------- - -.. automodule:: oslo_messaging.rpc.server - -.. currentmodule:: oslo_messaging - -.. autofunction:: get_rpc_server - -.. autoclass:: RPCDispatcher - -.. autoclass:: MessageHandlingServer - :members: - -.. autofunction:: expected_exceptions - -.. autoexception:: ExpectedException - -.. autofunction:: get_local_context diff --git a/doc/source/static/.placeholder b/doc/source/static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/doc/source/supported-messaging-drivers.rst b/doc/source/supported-messaging-drivers.rst deleted file mode 100644 index 5c25ed0..0000000 --- a/doc/source/supported-messaging-drivers.rst +++ /dev/null @@ -1,60 +0,0 @@ -============================= - Supported Messaging Drivers -============================= - -RabbitMQ may not be sufficient for the entire community as the community -grows. Pluggability is still something we should maintain, but we should -have a very high standard for drivers that are shipped and documented -as being supported. - -This document defines a very clear policy as to the requirements -for drivers to be carried in oslo.messaging and thus supported by the -OpenStack community as a whole. We will deprecate any drivers that do not -meet the requirements, and announce said deprecations in any appropriate -channels to give users time to signal their needs. Deprecation will last -for two release cycles before removing the code. We will also review and -update documentation to annotate which drivers are supported and which -are deprecated given these policies - -Policy ------- - -Testing -~~~~~~~ - -* Must have unit and/or functional test coverage of at least 60% as - reported by coverage report. Unit tests must be run for all versions - of python oslo.messaging currently gates on. - -* Must have integration testing including at least 3 popular oslo.messaging - dependents, preferably at the minimum a devstack-gate job with Nova, - Cinder, and Neutron. - -* All testing above must be voting in the gate of oslo.messaging. - -Documentation -~~~~~~~~~~~~~ - -* Must have a reasonable amount of documentation including documentation - in the official OpenStack deployment guide. - -Support -~~~~~~~ - -* Must have at least two individuals from the community committed to - triaging and fixing bugs, and responding to test failures in a timely - manner. - -Prospective Drivers -~~~~~~~~~~~~~~~~~~~ - -* Drivers that intend to meet the requirements above, but that do not yet - meet them will be given one full release cycle, or 6 months, whichever - is longer, to comply before being marked for deprecation. Their use, - however, will not be supported by the community. This will prevent a - chicken and egg problem for new drivers. - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/doc/source/target.rst b/doc/source/target.rst deleted file mode 100644 index 57a627c..0000000 --- a/doc/source/target.rst +++ /dev/null @@ -1,54 +0,0 @@ ------- -Target ------- - -.. currentmodule:: oslo_messaging - -.. autoclass:: Target - -=============== -Target Versions -=============== - -Target version numbers take the form Major.Minor. For a given message with -version X.Y, the server must be marked as able to handle messages of version -A.B, where A == X and B >= Y. - -The Major version number should be incremented for an almost completely new -API. The Minor version number would be incremented for backwards compatible -changes to an existing API. A backwards compatible change could be something -like adding a new method, adding an argument to an existing method (but not -requiring it), or changing the type for an existing argument (but still -handling the old type as well). - -If no version is specified it defaults to '1.0'. - -In the case of RPC, if you wish to allow your server interfaces to evolve such -that clients do not need to be updated in lockstep with the server, you should -take care to implement the server changes in a backwards compatible and have -the clients specify which interface version they require for each method. - -Adding a new method to an endpoint is a backwards compatible change and the -version attribute of the endpoint's target should be bumped from X.Y to X.Y+1. -On the client side, the new RPC invocation should have a specific version -specified to indicate the minimum API version that must be implemented for the -method to be supported. For example:: - - def get_host_uptime(self, ctxt, host): - cctxt = self.client.prepare(server=host, version='1.1') - return cctxt.call(ctxt, 'get_host_uptime') - -In this case, version '1.1' is the first version that supported the -get_host_uptime() method. - -Adding a new parameter to an RPC method can be made backwards compatible. The -endpoint version on the server side should be bumped. The implementation of -the method must not expect the parameter to be present.:: - - def some_remote_method(self, arg1, arg2, newarg=None): - # The code needs to deal with newarg=None for cases - # where an older client sends a message without it. - pass - -On the client side, the same changes should be made as in example 1. The -minimum version that supports the new parameter should be specified. diff --git a/doc/source/transport.rst b/doc/source/transport.rst deleted file mode 100644 index 3449e9b..0000000 --- a/doc/source/transport.rst +++ /dev/null @@ -1,28 +0,0 @@ ---------- -Transport ---------- - -.. currentmodule:: oslo_messaging - -.. autofunction:: get_transport - -.. autoclass:: Transport - -.. autoclass:: TransportURL - :members: - -.. autoclass:: TransportHost - -.. autofunction:: set_transport_defaults - - -Forking Processes and oslo.messaging Transport objects ------------------------------------------------------- - -oslo.messaging can't ensure that forking a process that shares the same -transport object is safe for the library consumer, because it relies on -different 3rd party libraries that don't ensure that. In certain -cases, with some drivers, it does work: - -* rabbit: works only if no connection have already been established. -* amqp1: works diff --git a/doc/source/zmq_driver.rst b/doc/source/zmq_driver.rst deleted file mode 100644 index bcc3d66..0000000 --- a/doc/source/zmq_driver.rst +++ /dev/null @@ -1,266 +0,0 @@ ------------------------------- -ZeroMQ Driver Deployment Guide ------------------------------- - -.. currentmodule:: oslo_messaging - -============ -Introduction -============ - -0MQ (also known as ZeroMQ or zmq) is embeddable networking library -but acts like a concurrency framework. It gives you sockets -that carry atomic messages across various transports -like in-process, inter-process, TCP, and multicast. You can connect -sockets N-to-N with patterns like fan-out, pub-sub, task distribution, -and request-reply. It's fast enough to be the fabric for clustered -products. Its asynchronous I/O model gives you scalable multi-core -applications, built as asynchronous message-processing tasks. It has -a score of language APIs and runs on most operating systems. - -Originally the zero in 0MQ was meant as "zero broker" and (as close to) -"zero latency" (as possible). Since then, it has come to encompass -different goals: zero administration, zero cost, and zero waste. -More generally, "zero" refers to the culture of minimalism that permeates -the project. - -More detail regarding ZeroMQ library is available from the `specification`_. - -.. _specification: http://zguide.zeromq.org/page:all - -======== -Abstract -======== - -Currently, ZeroMQ is one of the RPC backend drivers in oslo.messaging. ZeroMQ -can be the only RPC driver across the OpenStack cluster. -This document provides deployment information for this driver in oslo_messaging. - -Other than AMQP-based drivers, like RabbitMQ, ZeroMQ doesn't have -any central brokers in oslo.messaging, instead, each host (running OpenStack -services) is both ZeroMQ client and server. As a result, each host needs to -listen to a certain TCP port for incoming connections and directly connect -to other hosts simultaneously. - -Another option is to use a router proxy. It is not a broker because it -doesn't assume any message ownership or persistence or replication etc. It -performs only a redirection of messages to endpoints taking routing info from -message envelope. - -Topics are used to identify the destination for a ZeroMQ RPC call. There are -two types of topics, bare topics and directed topics. Bare topics look like -'compute', while directed topics look like 'compute.machine1'. - -======== -Scenario -======== - -Assuming the following systems as a goal. - -:: - - +--------+ - | Client | - +----+---+ - | - -----+---------+-----------------------+--------------------- - | | - +--------+------------+ +-------+----------------+ - | Controller Node | | Compute Node | - | Nova | | Neutron | - | Keystone | | Nova | - | Glance | | nova-compute | - | Neutron | | Ceilometer | - | Cinder | | | - | Ceilometer | +------------------------+ - | zmq-proxy | - | Redis | - | Horizon | - +---------------------+ - -============= -Configuration -============= - -Enabling (mandatory) --------------------- - -To enable the driver the 'transport_url' option must be set to 'zmq://' -in the section [DEFAULT] of the conf file, the 'rpc_zmq_host' flag -must be set to the hostname of the current node. :: - - [DEFAULT] - transport_url = "zmq://" - - [oslo_messaging_zmq] - rpc_zmq_host = {hostname} - - -Match Making (mandatory) ------------------------- - -The ZeroMQ driver implements a matching capability to discover hosts available -for communication when sending to a bare topic. This allows broker-less -communications. - -The MatchMaker is pluggable and it provides two different MatchMaker classes. - -DummyMatchMaker: default matchmaker driver for all-in-one scenario (messages -are sent to itself). - -RedisMatchMaker: loads the hash table from a remote Redis server, supports -dynamic host/topic registrations, host expiration, and hooks for consuming -applications to acknowledge or neg-acknowledge topic.host service availability. - -For ZeroMQ driver Redis is configured in transport_url also. For using Redis -specify the URL as follows:: - - [DEFAULT] - transport_url = "zmq+redis://127.0.0.1:6379" - -In order to cleanup redis storage from expired records (e.g. target listener -goes down) TTL may be applied for keys. Configure 'zmq_target_expire' option -which is 120 (seconds) by default. The option is related not specifically to -redis so it is also defined in [oslo_messaging_zmq] section. If option value -is <= 0 then keys don't expire and live forever in the storage. - -MatchMaker Data Source (mandatory) ----------------------------------- - -MatchMaker data source is stored in files or Redis server discussed in the -previous section. How to make up the database is the key issue for making ZeroMQ -driver work. - -If deploying the RedisMatchMaker, a Redis server is required. Each (K, V) pair -stored in Redis is that the key is a base topic and the corresponding values are -hostname arrays to be sent to. - - -HA for Redis database ---------------------- - -Single node Redis works fine for testing, but for production there is some -availability guarantees wanted. Without Redis database zmq deployment should -continue working anyway, because there is no need in Redis for services when -connections established already. But if you would like to restart some services -or run more workers or add more hardware nodes to the deployment you will need -nodes discovery mechanism to work and it requires Redis. - -To provide database recovery in situations when redis node goes down for example, -we use Sentinel solution and redis master-slave-slave configuration (if we have -3 controllers and run Redis on each of them). - -To deploy redis with HA follow the `sentinel-install`_ instructions. From the -messaging driver's side you will need to setup following configuration :: - - [DEFAULT] - transport_url = "zmq+redis://host1:26379,host2:26379,host3:26379" - - -Restrict the number of TCP sockets on controller ------------------------------------------------- - -The most heavily used RPC pattern (CALL) may consume too many TCP sockets on -controller node in directly connected configuration. To solve the issue -ROUTER proxy may be used. - -In order to configure driver to use ROUTER proxy set up the 'use_router_proxy' -option to true in [oslo_messaging_zmq] section (false is set by default). - -For example:: - - use_router_proxy = true - -Not less than 3 proxies should be running on controllers or on stand alone -nodes. The parameters for the script oslo-messaging-zmq-proxy should be:: - - oslo-messaging-zmq-proxy - --config-file /etc/oslo/zeromq.conf - --log-file /var/log/oslo/zmq-router-proxy.log - -Fanout-based patterns like CAST+Fanout and notifications always use proxy -as they act over PUB/SUB, 'use_pub_sub' option defaults to true. In such case -publisher proxy should be running. Actually proxy does both: routing to a -DEALER endpoint for direct messages and publishing to all subscribers over -zmq.PUB socket. - -If not using PUB/SUB (use_pub_sub = false) then fanout will be emulated over -direct DEALER/ROUTER unicast which is possible but less efficient and therefore -is not recommended. In a case of direct DEALER/ROUTER unicast proxy is not -needed. - -This option can be set in [oslo_messaging_zmq] section. - -For example:: - - use_pub_sub = true - - -In case of using a proxy all publishers (clients) talk to servers over -the proxy connecting to it via TCP. - -You can specify ZeroMQ options in /etc/oslo/zeromq.conf if necessary. - - -Listening Address (optional) ----------------------------- - -All services bind to an IP address or Ethernet adapter. By default, all services -bind to '*', effectively binding to 0.0.0.0. This may be changed with the option -'rpc_zmq_bind_address' which accepts a wildcard, IP address, or Ethernet adapter. - -This configuration can be set in [oslo_messaging_zmq] section. - -For example:: - - rpc_zmq_bind_address = * - -Currently zmq driver uses dynamic port binding mechanism, which means that -each listener will allocate port of a random number. Ports range is controlled -by two options 'rpc_zmq_min_port' and 'rpc_zmq_max_port'. Change them to -restrict current service's port binding range. 'rpc_zmq_bind_port_retries' -controls number of retries before 'ports range exceeded' failure. - -For example:: - - rpc_zmq_min_port = 9050 - rpc_zmq_max_port = 10050 - rpc_zmq_bind_port_retries = 100 - - -DevStack Support ----------------- - -ZeroMQ driver has been supported by DevStack. The configuration is as follows:: - - ENABLED_SERVICES+=,-rabbit,zeromq - ZEROMQ_MATCHMAKER=redis - -In local.conf [localrc] section need to enable zmq plugin which lives in -`devstack-plugin-zmq`_ repository. - -For example:: - - enable_plugin zmq https://github.com/openstack/devstack-plugin-zmq.git - - -Example of local.conf:: - - [[local|localrc]] - DATABASE_PASSWORD=password - ADMIN_PASSWORD=password - SERVICE_PASSWORD=password - SERVICE_TOKEN=password - - enable_plugin zmq https://github.com/openstack/devstack-plugin-zmq.git - - OSLOMSG_REPO=https://review.openstack.org/openstack/oslo.messaging - OSLOMSG_BRANCH=master - - ZEROMQ_MATCHMAKER=redis - LIBS_FROM_GIT=oslo.messaging - ENABLE_DEBUG_LOG_LEVEL=True - - -.. _devstack-plugin-zmq: https://github.com/openstack/devstack-plugin-zmq.git -.. _sentinel-install: http://redis.io/topics/sentinel diff --git a/etc/routing_notifier.yaml.sample b/etc/routing_notifier.yaml.sample deleted file mode 100644 index 47af4b9..0000000 --- a/etc/routing_notifier.yaml.sample +++ /dev/null @@ -1,29 +0,0 @@ -# Setting a priority AND an event means both have to be satisfied. -# -# However, defining different sets for the same driver allows you -# to do OR operations. -# -# See how this logic is modelled below: -# -# if (priority in info, warn or error) or -# (event == compute.scheduler.run_instance) -# send to messaging driver ... -# -# if priority == 'poll' and -# event == 'bandwidth.*' -# send to poll driver - -group_1: - messaging: - accepted_priorities: ['info', 'warn', 'error'] - - poll: - accepted_priorities: ['poll'] - accepted_events: ['bandwidth.*'] - - log: - accepted_events: ['compute.instance.exists'] - -group_2: - messaging:⋅ - accepted_events: ['compute.scheduler.run_instance.*'] diff --git a/oslo_messaging/__init__.py b/oslo_messaging/__init__.py deleted file mode 100644 index 83529c5..0000000 --- a/oslo_messaging/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from .exceptions import * -from .notify import * -from .rpc import * -from .serializer import * -from .server import * -from .target import * -from .transport import * diff --git a/oslo_messaging/_cmd/__init__.py b/oslo_messaging/_cmd/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_cmd/zmq_proxy.py b/oslo_messaging/_cmd/zmq_proxy.py deleted file mode 100644 index 3126a41..0000000 --- a/oslo_messaging/_cmd/zmq_proxy.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import argparse -import logging - -from oslo_config import cfg - -from oslo_messaging._drivers.zmq_driver.proxy import zmq_proxy -from oslo_messaging._drivers.zmq_driver.proxy import zmq_queue_proxy -from oslo_messaging._drivers.zmq_driver import zmq_options - -CONF = cfg.CONF - -zmq_options.register_opts(CONF) - -opt_group = cfg.OptGroup(name='zmq_proxy_opts', - title='ZeroMQ proxy options') -CONF.register_opts(zmq_proxy.zmq_proxy_opts, group=opt_group) - - -USAGE = """ Usage: ./zmq-proxy.py [-h] [] ... - -Usage example: - python oslo_messaging/_cmd/zmq-proxy.py""" - - -def main(): - parser = argparse.ArgumentParser( - description='ZeroMQ proxy service', - usage=USAGE - ) - - parser.add_argument('--config-file', dest='config_file', type=str, - help='Path to configuration file') - - parser.add_argument('--host', dest='host', type=str, - help='Host FQDN for current proxy') - parser.add_argument('--frontend-port', dest='frontend_port', type=int, - help='Front-end ROUTER port number') - parser.add_argument('--backend-port', dest='backend_port', type=int, - help='Back-end ROUTER port number') - parser.add_argument('--publisher-port', dest='publisher_port', type=int, - help='Back-end PUBLISHER port number') - - parser.add_argument('-d', '--debug', dest='debug', type=bool, - default=False, - help="Turn on DEBUG logging level instead of INFO") - - args = parser.parse_args() - - if args.config_file: - cfg.CONF(["--config-file", args.config_file]) - - log_level = logging.INFO - if args.debug: - log_level = logging.DEBUG - logging.basicConfig(level=log_level, - format='%(asctime)s %(name)s ' - '%(levelname)-8s %(message)s') - - if args.host: - CONF.zmq_proxy_opts.host = args.host - if args.frontend_port: - CONF.set_override('frontend_port', args.frontend_port, - group='zmq_proxy_opts') - if args.backend_port: - CONF.set_override('backend_port', args.backend_port, - group='zmq_proxy_opts') - if args.publisher_port: - CONF.set_override('publisher_port', args.publisher_port, - group='zmq_proxy_opts') - - reactor = zmq_proxy.ZmqProxy(CONF, zmq_queue_proxy.UniversalQueueProxy) - - try: - while True: - reactor.run() - except (KeyboardInterrupt, SystemExit): - reactor.close() - -if __name__ == "__main__": - main() diff --git a/oslo_messaging/_drivers/__init__.py b/oslo_messaging/_drivers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/amqp.py b/oslo_messaging/_drivers/amqp.py deleted file mode 100644 index d6ad58c..0000000 --- a/oslo_messaging/_drivers/amqp.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2011 - 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Shared code between AMQP based openstack.common.rpc implementations. - -The code in this module is shared between the rpc implementations based on -AMQP. Specifically, this includes impl_kombu. impl_carrot also -uses AMQP, but is deprecated and predates this code. -""" - -import collections -import uuid - -from oslo_config import cfg -import six - -from oslo_messaging._drivers import common as rpc_common - -deprecated_durable_opts = [ - cfg.DeprecatedOpt('amqp_durable_queues', - group='DEFAULT'), - cfg.DeprecatedOpt('rabbit_durable_queues', - group='DEFAULT') -] - -amqp_opts = [ - cfg.BoolOpt('amqp_durable_queues', - default=False, - deprecated_opts=deprecated_durable_opts, - help='Use durable queues in AMQP.'), - cfg.BoolOpt('amqp_auto_delete', - default=False, - deprecated_group='DEFAULT', - help='Auto-delete queues in AMQP.'), -] - -UNIQUE_ID = '_unique_id' - - -class RpcContext(rpc_common.CommonRpcContext): - """Context that supports replying to a rpc.call.""" - def __init__(self, **kwargs): - self.msg_id = kwargs.pop('msg_id', None) - self.reply_q = kwargs.pop('reply_q', None) - super(RpcContext, self).__init__(**kwargs) - - def deepcopy(self): - values = self.to_dict() - values['conf'] = self.conf - values['msg_id'] = self.msg_id - values['reply_q'] = self.reply_q - return self.__class__(**values) - - -def unpack_context(msg): - """Unpack context from msg.""" - context_dict = {} - for key in list(msg.keys()): - key = six.text_type(key) - if key.startswith('_context_'): - value = msg.pop(key) - context_dict[key[9:]] = value - context_dict['msg_id'] = msg.pop('_msg_id', None) - context_dict['reply_q'] = msg.pop('_reply_q', None) - return RpcContext.from_dict(context_dict) - - -def pack_context(msg, context): - """Pack context into msg. - - Values for message keys need to be less than 255 chars, so we pull - context out into a bunch of separate keys. If we want to support - more arguments in rabbit messages, we may want to do the same - for args at some point. - - """ - if isinstance(context, dict): - context_d = six.iteritems(context) - else: - context_d = six.iteritems(context.to_dict()) - - msg.update(('_context_%s' % key, value) - for (key, value) in context_d) - - -class _MsgIdCache(object): - """This class checks any duplicate messages.""" - - # NOTE: This value is considered can be a configuration item, but - # it is not necessary to change its value in most cases, - # so let this value as static for now. - DUP_MSG_CHECK_SIZE = 16 - - def __init__(self, **kwargs): - self.prev_msgids = collections.deque([], - maxlen=self.DUP_MSG_CHECK_SIZE) - - def check_duplicate_message(self, message_data): - """AMQP consumers may read same message twice when exceptions occur - before ack is returned. This method prevents doing it. - """ - try: - msg_id = message_data.pop(UNIQUE_ID) - except KeyError: - return - if msg_id in self.prev_msgids: - raise rpc_common.DuplicateMessageError(msg_id=msg_id) - return msg_id - - def add(self, msg_id): - if msg_id and msg_id not in self.prev_msgids: - self.prev_msgids.append(msg_id) - - -def _add_unique_id(msg): - """Add unique_id for checking duplicate messages.""" - unique_id = uuid.uuid4().hex - msg.update({UNIQUE_ID: unique_id}) - - -class AMQPDestinationNotFound(Exception): - pass diff --git a/oslo_messaging/_drivers/amqp1_driver/__init__.py b/oslo_messaging/_drivers/amqp1_driver/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/amqp1_driver/controller.py b/oslo_messaging/_drivers/amqp1_driver/controller.py deleted file mode 100644 index 440939c..0000000 --- a/oslo_messaging/_drivers/amqp1_driver/controller.py +++ /dev/null @@ -1,748 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Controller that manages the interface between the driver and the messaging -service. - -This module defines a Controller class that is responsible for performing -messaging-related operations (Tasks) requested by the driver, and for managing -the connection to the messaging service. The Controller creates a background -thread which performs all messaging operations and socket I/O. The -Controller's messaging logic is executed in the background thread via lambda -functions scheduled by the Controller. -""" - -import abc -import logging -import random -import threading -import uuid - -from oslo_config import cfg -import proton -import pyngus -from six import moves - -from oslo_messaging._drivers.amqp1_driver import eventloop -from oslo_messaging._drivers.amqp1_driver import opts -from oslo_messaging._i18n import _LE, _LI, _LW -from oslo_messaging import exceptions -from oslo_messaging import transport - -LOG = logging.getLogger(__name__) - - -class Task(object): - """Perform a messaging operation via the Controller.""" - @abc.abstractmethod - def execute(self, controller): - """This method will be run on the eventloop thread.""" - - -class Sender(pyngus.SenderEventHandler): - """A single outgoing link to a given address""" - def __init__(self, address): - self._address = address - self._link = None - - def attach(self, connection): - # open a link to the destination - sname = "Producer-%s:src=%s:tgt=%s" % (uuid.uuid4().hex, - self._address, - self._address) - self._link = connection.create_sender(name=sname, - source_address=self._address, - target_address=self._address) - self._link.open() - - def detach(self): - # close the link - if self._link: - self._link.close() - - def destroy(self): - # drop reference to link. The link will be freed when the - # connection is destroyed - self._link = None - - def send(self, message, callback): - # send message out the link, invoke callback when acked - self._link.send(message, delivery_callback=callback) - - def sender_remote_closed(self, sender_link, pn_condition): - LOG.debug("sender_remote_closed condition=%s", pn_condition) - sender_link.close() - - def sender_failed(self, sender_link, error): - """Protocol error occurred.""" - LOG.error(_LE("Outgoing link to %(addr) failed. error=%(error)"), - {"addr": self._address, "error": error}) - - -class Replies(pyngus.ReceiverEventHandler): - """This is the receiving link for all reply messages. Messages are routed - to the proper Listener's incoming queue using the correlation-id header in - the message. - """ - def __init__(self, connection, on_ready): - self._correlation = {} # map of correlation-id to response queue - self._ready = False - self._on_ready = on_ready - rname = "Consumer-%s:src=[dynamic]:tgt=replies" % uuid.uuid4().hex - self._receiver = connection.create_receiver("replies", - event_handler=self, - name=rname) - - # capacity determines the maximum number of reply messages this link - # can receive. As messages are received and credit is consumed, this - # driver will 'top up' the credit back to max capacity. This number - # should be large enough to avoid needlessly flow-controlling the - # replies. - self.capacity = 100 # TODO(kgiusti) guesstimate - make configurable - self._credit = 0 - self._receiver.open() - - def detach(self): - # close the link - self._receiver.close() - - def destroy(self): - # drop reference to link. Link will be freed when the connection is - # released. - self._receiver = None - - def ready(self): - return self._ready - - def prepare_for_response(self, request, reply_queue): - """Apply a unique message identifier to this request message. This will - be used to identify messages sent in reply. The identifier is placed - in the 'id' field of the request message. It is expected that the - identifier will appear in the 'correlation-id' field of the - corresponding response message. - """ - request.id = uuid.uuid4().hex - # reply is placed on reply_queue - self._correlation[request.id] = reply_queue - request.reply_to = self._receiver.source_address - LOG.debug("Reply for msg id=%(id)s expected on link %(reply_to)s", - {'id': request.id, 'reply_to': request.reply_to}) - return request.id - - def cancel_response(self, msg_id): - """Abort waiting for a response message. This can be used if the - request fails and no reply is expected. - """ - if msg_id in self._correlation: - del self._correlation[msg_id] - - # Pyngus ReceiverLink event callbacks: - - def receiver_active(self, receiver_link): - """This is a Pyngus callback, invoked by Pyngus when the receiver_link - has transitioned to the open state and is able to receive incoming - messages. - """ - self._ready = True - self._update_credit() - self._on_ready() - LOG.debug("Replies expected on link %s", - self._receiver.source_address) - - def receiver_remote_closed(self, receiver, pn_condition): - """This is a Pyngus callback, invoked by Pyngus when the peer of this - receiver link has initiated closing the connection. - """ - # TODO(kgiusti) Log for now, possibly implement a recovery strategy if - # necessary. - if pn_condition: - LOG.error(_LE("Reply subscription closed by peer: %s"), - pn_condition) - receiver.close() - - def receiver_failed(self, receiver_link, error): - """Protocol error occurred.""" - LOG.error(_LE("Link to reply queue %(addr) failed. error=%(error)"), - {"addr": self._address, "error": error}) - - def message_received(self, receiver, message, handle): - """This is a Pyngus callback, invoked by Pyngus when a new message - arrives on this receiver link from the peer. - """ - self._credit = self._credit - 1 - self._update_credit() - - key = message.correlation_id - if key in self._correlation: - LOG.debug("Received response for msg id=%s", key) - result = {"status": "OK", - "response": message} - self._correlation[key].put(result) - # cleanup (only need one response per request) - del self._correlation[key] - receiver.message_accepted(handle) - else: - LOG.warning(_LW("Can't find receiver for response msg id=%s, " - "dropping!"), key) - receiver.message_modified(handle, True, True, None) - - def _update_credit(self): - # ensure we have enough credit - if self._credit < self.capacity / 2: - self._receiver.add_capacity(self.capacity - self._credit) - self._credit = self.capacity - - -class Server(pyngus.ReceiverEventHandler): - """A group of links that receive messages from a set of addresses derived - from a given target. Messages arriving on the links are placed on the - 'incoming' queue. - """ - def __init__(self, addresses, incoming, subscription_id): - self._incoming = incoming - self._addresses = addresses - self._capacity = 500 # credit per link - self._receivers = [] - self._id = subscription_id - - def attach(self, connection): - """Create receiver links over the given connection for all the - configured addresses. - """ - for a in self._addresses: - props = {"snd-settle-mode": "settled"} - rname = "Consumer-%s:src=%s:tgt=%s" % (uuid.uuid4().hex, a, a) - r = connection.create_receiver(source_address=a, - target_address=a, - event_handler=self, - name=rname, - properties=props) - - # TODO(kgiusti) Hardcoding credit here is sub-optimal. A better - # approach would monitor for a back-up of inbound messages to be - # processed by the consuming application and backpressure the - # sender based on configured thresholds. - r.add_capacity(self._capacity) - r.open() - self._receivers.append(r) - - def detach(self): - # close the links - for receiver in self._receivers: - receiver.close() - - def reset(self): - # destroy the links, but keep the addresses around since we may be - # failing over. Since links are destroyed, this cannot be called from - # any of the following ReceiverLink callbacks. - for r in self._receivers: - r.destroy() - self._receivers = [] - - # Pyngus ReceiverLink event callbacks: - - def receiver_remote_closed(self, receiver, pn_condition): - """This is a Pyngus callback, invoked by Pyngus when the peer of this - receiver link has initiated closing the connection. - """ - if pn_condition: - vals = { - "addr": receiver.source_address or receiver.target_address, - "err_msg": pn_condition - } - LOG.error(_LE("Server subscription %(addr)s closed " - "by peer: %(err_msg)s"), vals) - receiver.close() - - def receiver_failed(self, receiver_link, error): - """Protocol error occurred.""" - LOG.error(_LE("Listener link queue %(addr) failed. error=%(error)"), - {"addr": self._address, "error": error}) - - def message_received(self, receiver, message, handle): - """This is a Pyngus callback, invoked by Pyngus when a new message - arrives on this receiver link from the peer. - """ - if receiver.capacity < self._capacity / 2: - receiver.add_capacity(self._capacity - receiver.capacity) - self._incoming.put(message) - LOG.debug("message received: %s", message) - receiver.message_accepted(handle) - - -class Hosts(object): - """An order list of TransportHost addresses. Connection failover - progresses from one host to the next. username and password come from the - configuration and are used only if no username/password was given in the - URL. - """ - def __init__(self, entries=None, default_username=None, - default_password=None): - if entries: - self._entries = entries[:] - else: - self._entries = [transport.TransportHost(hostname="localhost", - port=5672)] - for entry in self._entries: - entry.port = entry.port or 5672 - entry.username = entry.username or default_username - entry.password = entry.password or default_password - self._current = random.randint(0, len(self._entries) - 1) - - @property - def current(self): - return self._entries[self._current] - - def next(self): - if len(self._entries) > 1: - self._current = (self._current + 1) % len(self._entries) - return self.current - - def __repr__(self): - return '' - - def __str__(self): - return ", ".join(["%r" % th for th in self._entries]) - - -class Controller(pyngus.ConnectionEventHandler): - """Controls the connection to the AMQP messaging service. This object is - the 'brains' of the driver. It maintains the logic for addressing, sending - and receiving messages, and managing the connection. All messaging and I/O - work is done on the Eventloop thread, allowing the driver to run - asynchronously from the messaging clients. - """ - def __init__(self, hosts, default_exchange, config): - self.processor = None - self._socket_connection = None - # queue of Task() objects to execute on the eventloop once the - # connection is ready: - self._tasks = moves.queue.Queue(maxsize=500) - # limit the number of Task()'s to execute per call to _process_tasks(). - # This allows the eventloop main thread to return to servicing socket - # I/O in a timely manner - self._max_task_batch = 50 - # cache of sending links indexed by address: - self._senders = {} - # Servers indexed by target. Each entry is a map indexed by the - # specific ProtonListener's identifier: - self._servers = {} - - opt_group = cfg.OptGroup(name='oslo_messaging_amqp', - title='AMQP 1.0 driver options') - config.register_group(opt_group) - config.register_opts(opts.amqp1_opts, group=opt_group) - - self.server_request_prefix = \ - config.oslo_messaging_amqp.server_request_prefix - self.broadcast_prefix = config.oslo_messaging_amqp.broadcast_prefix - self.group_request_prefix = \ - config.oslo_messaging_amqp.group_request_prefix - self._container_name = config.oslo_messaging_amqp.container_name - self.idle_timeout = config.oslo_messaging_amqp.idle_timeout - self.trace_protocol = config.oslo_messaging_amqp.trace - self.ssl_ca_file = config.oslo_messaging_amqp.ssl_ca_file - self.ssl_cert_file = config.oslo_messaging_amqp.ssl_cert_file - self.ssl_key_file = config.oslo_messaging_amqp.ssl_key_file - self.ssl_key_password = config.oslo_messaging_amqp.ssl_key_password - self.ssl_allow_insecure = \ - config.oslo_messaging_amqp.allow_insecure_clients - self.sasl_mechanisms = config.oslo_messaging_amqp.sasl_mechanisms - self.sasl_config_dir = config.oslo_messaging_amqp.sasl_config_dir - self.sasl_config_name = config.oslo_messaging_amqp.sasl_config_name - self.hosts = Hosts(hosts, config.oslo_messaging_amqp.username, - config.oslo_messaging_amqp.password) - self.separator = "." - self.fanout_qualifier = "all" - self.default_exchange = default_exchange - - # can't handle a request until the replies link is active, as - # we need the peer assigned address, so need to delay any - # processing of task queue until this is done - self._replies = None - # Set True when the driver is shutting down - self._closing = False - # only schedule one outstanding reconnect attempt at a time - self._reconnecting = False - self._delay = 0 # seconds between retries - # prevent queuing up multiple requests to run _process_tasks() - self._process_tasks_scheduled = False - self._process_tasks_lock = threading.Lock() - - def connect(self): - """Connect to the messaging service.""" - self.processor = eventloop.Thread(self._container_name) - self.processor.wakeup(lambda: self._do_connect()) - - def add_task(self, task): - """Add a Task for execution on processor thread.""" - self._tasks.put(task) - self._schedule_task_processing() - - def shutdown(self, timeout=None): - """Shutdown the messaging service.""" - LOG.info(_LI("Shutting down the AMQP 1.0 connection")) - if self.processor: - self.processor.wakeup(lambda: self._start_shutdown()) - LOG.debug("Waiting for eventloop to exit") - self.processor.join(timeout) - self._hard_reset() - self.processor.destroy() - self.processor = None - LOG.debug("Eventloop exited, driver shut down") - - # The remaining methods are reserved to run from the eventloop thread only! - # They must not be invoked directly! - - # methods executed by Tasks created by the driver: - - def request(self, target, request, result_queue, reply_expected=False): - """Send a request message to the given target and arrange for a - result to be put on the result_queue. If reply_expected, the result - will include the reply message (if successful). - """ - address = self._resolve(target) - LOG.debug("Sending request for %(target)s to %(address)s", - {'target': target, 'address': address}) - if reply_expected: - msg_id = self._replies.prepare_for_response(request, result_queue) - - def _callback(link, handle, state, info): - if state == pyngus.SenderLink.ACCEPTED: # message received - if not reply_expected: - # can wake up the sender now - result = {"status": "OK"} - result_queue.put(result) - else: - # we will wake up the sender when the reply message is - # received. See Replies.message_received() - pass - else: # send failed/rejected/etc - msg = "Message send failed: remote disposition: %s, info: %s" - exc = exceptions.MessageDeliveryFailure(msg % (state, info)) - result = {"status": "ERROR", "error": exc} - if reply_expected: - # no response will be received, so cancel the correlation - self._replies.cancel_response(msg_id) - result_queue.put(result) - self._send(address, request, _callback) - - def response(self, address, response): - """Send a response message to the client listening on 'address'. - To prevent a misbehaving client from blocking a server indefinitely, - the message is send asynchronously. - """ - LOG.debug("Sending response to %s", address) - self._send(address, response) - - def subscribe(self, target, in_queue, subscription_id): - """Subscribe to messages sent to 'target', place received messages on - 'in_queue'. - """ - addresses = [ - self._server_address(target), - self._broadcast_address(target), - self._group_request_address(target) - ] - self._subscribe(target, addresses, in_queue, subscription_id) - - def subscribe_notifications(self, target, in_queue, subscription_id): - """Subscribe for notifications on 'target', place received messages on - 'in_queue'. - """ - addresses = [self._group_request_address(target)] - self._subscribe(target, addresses, in_queue, subscription_id) - - def _subscribe(self, target, addresses, in_queue, subscription_id): - LOG.debug("Subscribing to %(target)s (%(addresses)s)", - {'target': target, 'addresses': addresses}) - server = Server(addresses, in_queue, subscription_id) - servers = self._servers.get(target) - if servers is None: - servers = {} - self._servers[target] = servers - servers[subscription_id] = server - server.attach(self._socket_connection.connection) - - def _resolve(self, target): - """Return a link address for a given target.""" - if target.fanout: - return self._broadcast_address(target) - elif target.server: - return self._server_address(target) - else: - return self._group_request_address(target) - - def _sender(self, address): - # if we already have a sender for that address, use it - # else establish the sender and cache it - sender = self._senders.get(address) - if sender is None: - sender = Sender(address) - sender.attach(self._socket_connection.connection) - self._senders[address] = sender - return sender - - def _send(self, addr, message, callback=None, handle=None): - """Send the message out the link addressed by 'addr'. If a - delivery_callback is given it will be invoked when the send has - completed (whether successfully or in error). - """ - address = str(addr) - message.address = address - self._sender(address).send(message, callback) - - def _server_address(self, target): - return self._concatenate([self.server_request_prefix, - target.exchange or self.default_exchange, - target.topic, target.server]) - - def _broadcast_address(self, target): - return self._concatenate([self.broadcast_prefix, - target.exchange or self.default_exchange, - target.topic, self.fanout_qualifier]) - - def _group_request_address(self, target): - return self._concatenate([self.group_request_prefix, - target.exchange or self.default_exchange, - target.topic]) - - def _concatenate(self, items): - return self.separator.join(filter(bool, items)) - - # commands executed on the processor (eventloop) via 'wakeup()': - - def _do_connect(self): - """Establish connection and reply subscription on processor thread.""" - host = self.hosts.current - conn_props = {'hostname': host.hostname} - if self.idle_timeout: - conn_props["idle-time-out"] = float(self.idle_timeout) - if self.trace_protocol: - conn_props["x-trace-protocol"] = self.trace_protocol - if self.ssl_ca_file: - conn_props["x-ssl-ca-file"] = self.ssl_ca_file - if self.ssl_cert_file: - # assume this connection is for a server. If client authentication - # support is developed, we'll need an explicit flag (server or - # client) - conn_props["x-ssl-server"] = True - conn_props["x-ssl-identity"] = (self.ssl_cert_file, - self.ssl_key_file, - self.ssl_key_password) - conn_props["x-ssl-allow-cleartext"] = self.ssl_allow_insecure - # SASL configuration: - if self.sasl_mechanisms: - conn_props["x-sasl-mechs"] = self.sasl_mechanisms - if self.sasl_config_dir: - conn_props["x-sasl-config-dir"] = self.sasl_config_dir - if self.sasl_config_name: - conn_props["x-sasl-config-name"] = self.sasl_config_name - - self._socket_connection = self.processor.connect(host, - handler=self, - properties=conn_props) - LOG.debug("Connection initiated") - - def _process_tasks(self): - """Execute Task objects in the context of the processor thread.""" - with self._process_tasks_lock: - self._process_tasks_scheduled = False - count = 0 - while (not self._tasks.empty() and - count < self._max_task_batch and - self._can_process_tasks): - try: - self._tasks.get(False).execute(self) - except Exception as e: - LOG.exception(_LE("Error processing task: %s"), e) - count += 1 - - # if we hit _max_task_batch, resume task processing later: - if not self._tasks.empty() and self._can_process_tasks: - self._schedule_task_processing() - - def _schedule_task_processing(self): - """_process_tasks() helper: prevent queuing up multiple requests for - task processing. This method is called both by the application thread - and the processing thread. - """ - if self.processor: - with self._process_tasks_lock: - already_scheduled = self._process_tasks_scheduled - self._process_tasks_scheduled = True - if not already_scheduled: - self.processor.wakeup(lambda: self._process_tasks()) - - @property - def _can_process_tasks(self): - """_process_tasks helper(): indicates that the driver is ready to - process Tasks. In order to process messaging-related tasks, the reply - queue link must be active. - """ - return (not self._closing and - self._replies and self._replies.ready()) - - def _start_shutdown(self): - """Called when the application is closing the transport. - Attempt to cleanly flush/close all links. - """ - self._closing = True - if (self._socket_connection - and self._socket_connection.connection - and self._socket_connection.connection.active): - # try a clean shutdown - for sender in self._senders.values(): - sender.detach() - for servers in self._servers.values(): - for server in servers.values(): - server.detach() - self._replies.detach() - self._socket_connection.connection.close() - else: - # don't wait for a close from the remote, may never happen - self.processor.shutdown() - - # reply link active callback: - - def _reply_link_ready(self): - """Invoked when the Replies reply link has become active. At this - point, we are ready to send/receive messages (via Task processing). - """ - LOG.info(_LI("Messaging is active (%(hostname)s:%(port)s)"), - {'hostname': self.hosts.current.hostname, - 'port': self.hosts.current.port}) - self._schedule_task_processing() - - # callback from eventloop on socket error - - def socket_error(self, error): - """Called by eventloop when a socket error occurs.""" - LOG.error(_LE("Socket failure: %s"), error) - self._handle_connection_loss() - - # Pyngus connection event callbacks (and their helpers), all invoked from - # the eventloop thread: - - def connection_failed(self, connection, error): - """This is a Pyngus callback, invoked by Pyngus when a non-recoverable - error occurs on the connection. - """ - if connection is not self._socket_connection.connection: - # pyngus bug: ignore failure callback on destroyed connections - return - LOG.debug("AMQP Connection failure: %s", error) - self._handle_connection_loss() - - def connection_active(self, connection): - """This is a Pyngus callback, invoked by Pyngus when the connection to - the peer is up. At this point, the driver will activate all subscriber - links (server) and the reply link. - """ - LOG.debug("Connection active (%(hostname)s:%(port)s), subscribing...", - {'hostname': self.hosts.current.hostname, - 'port': self.hosts.current.port}) - for servers in self._servers.values(): - for server in servers.values(): - server.attach(self._socket_connection.connection) - self._replies = Replies(self._socket_connection.connection, - lambda: self._reply_link_ready()) - self._delay = 0 - - def connection_closed(self, connection): - """This is a Pyngus callback, invoked by Pyngus when the connection has - cleanly closed. This occurs after the driver closes the connection - locally, and the peer has acknowledged the close. At this point, the - shutdown of the driver's connection is complete. - """ - LOG.debug("AMQP connection closed.") - # if the driver isn't being shutdown, failover and reconnect - self._handle_connection_loss() - - def connection_remote_closed(self, connection, reason): - """This is a Pyngus callback, invoked by Pyngus when the peer has - requested that the connection be closed. - """ - # The messaging service/broker is trying to shut down the - # connection. Acknowledge the close, and try to reconnect/failover - # later once the connection has closed (connection_closed is called). - if reason: - LOG.info(_LI("Connection closed by peer: %s"), reason) - self._socket_connection.connection.close() - - def sasl_done(self, connection, pn_sasl, outcome): - """This is a Pyngus callback invoked when the SASL handshake - has completed. The outcome of the handshake is passed in the outcome - argument. - """ - if outcome == proton.SASL.OK: - return - LOG.error(_LE("AUTHENTICATION FAILURE: Cannot connect to " - "%(hostname)s:%(port)s as user %(username)s"), - {'hostname': self.hosts.current.hostname, - 'port': self.hosts.current.port, - 'username': self.hosts.current.username}) - # connection failure will be handled later - - def _handle_connection_loss(self): - """The connection to the messaging service has been lost. Try to - reestablish the connection/failover if not shutting down the driver. - """ - if self._closing: - # we're in the middle of shutting down the driver anyways, - # just consider it done: - self.processor.shutdown() - else: - # for some reason, we've lost the connection to the messaging - # service. Try to re-establish the connection: - if not self._reconnecting: - self._reconnecting = True - LOG.info(_LI("delaying reconnect attempt for %d seconds"), - self._delay) - self.processor.schedule(lambda: self._do_reconnect(), - self._delay) - self._delay = (1 if self._delay == 0 - else min(self._delay * 2, 60)) - - def _do_reconnect(self): - """Invoked on connection/socket failure, failover and re-connect to the - messaging service. - """ - if not self._closing: - self._hard_reset() - self._reconnecting = False - host = self.hosts.next() - LOG.info(_LI("Reconnecting to: %(hostname)s:%(port)s"), - {'hostname': host.hostname, 'port': host.port}) - self._socket_connection.connect(host) - - def _hard_reset(self): - """Reset the controller to its pre-connection state""" - # note well: since this method destroys the connection, it cannot be - # invoked directly from a pyngus callback. Use processor.schedule() to - # run this method on the main loop instead. - for sender in self._senders.values(): - sender.destroy() - self._senders.clear() - for servers in self._servers.values(): - for server in servers.values(): - # discard links, but keep servers around to re-attach if - # failing over - server.reset() - if self._replies: - self._replies.destroy() - self._replies = None - if self._socket_connection: - self._socket_connection.reset() diff --git a/oslo_messaging/_drivers/amqp1_driver/drivertasks.py b/oslo_messaging/_drivers/amqp1_driver/drivertasks.py deleted file mode 100644 index 74e3d3f..0000000 --- a/oslo_messaging/_drivers/amqp1_driver/drivertasks.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import threading -import time - -from oslo_messaging._drivers.amqp1_driver import controller -from oslo_messaging._i18n import _LW -from oslo_messaging import exceptions - -from six import moves - -LOG = logging.getLogger(__name__) - - -class SendTask(controller.Task): - """A task that sends a message to a target, and optionally waits for a - reply message. The caller may block until the remote confirms receipt or - the reply message has arrived. - """ - def __init__(self, target, request, wait_for_reply, deadline): - super(SendTask, self).__init__() - self._target = target - self._request = request - self._deadline = deadline - self._wait_for_reply = wait_for_reply - self._results_queue = moves.queue.Queue() - - def wait(self, timeout): - """Wait for the send to complete, and, optionally, a reply message from - the remote. Will raise MessagingTimeout if the send does not complete - or no reply is received within timeout seconds. If the request has - failed for any other reason, a MessagingException is raised. - """ - try: - result = self._results_queue.get(timeout=timeout) - except moves.queue.Empty: - if self._wait_for_reply: - reason = "Timed out waiting for a reply." - else: - reason = "Timed out waiting for send to complete." - raise exceptions.MessagingTimeout(reason) - if result["status"] == "OK": - return result.get("response", None) - raise result["error"] - - def execute(self, controller): - """Runs on eventloop thread - sends request.""" - if not self._deadline or self._deadline > time.time(): - controller.request(self._target, self._request, - self._results_queue, self._wait_for_reply) - else: - LOG.warning(_LW("Send request to %s aborted: TTL expired."), - self._target) - - -class ListenTask(controller.Task): - """A task that creates a subscription to the given target. Messages - arriving from the target are given to the listener. - """ - def __init__(self, target, listener, notifications=False): - """Create a subscription to the target.""" - super(ListenTask, self).__init__() - self._target = target - self._listener = listener - self._notifications = notifications - - def execute(self, controller): - """Run on the eventloop thread - subscribes to target. Inbound messages - are queued to the listener's incoming queue. - """ - if self._notifications: - controller.subscribe_notifications(self._target, - self._listener.incoming, - self._listener.id) - else: - controller.subscribe(self._target, - self._listener.incoming, - self._listener.id) - - -class ReplyTask(controller.Task): - """A task that sends 'response' message to 'address'. - """ - def __init__(self, address, response): - super(ReplyTask, self).__init__() - self._address = address - self._response = response - self._wakeup = threading.Event() - - def wait(self): - """Wait for the controller to send the message. - """ - self._wakeup.wait() - - def execute(self, controller): - """Run on the eventloop thread - send the response message.""" - controller.response(self._address, self._response) - self._wakeup.set() diff --git a/oslo_messaging/_drivers/amqp1_driver/eventloop.py b/oslo_messaging/_drivers/amqp1_driver/eventloop.py deleted file mode 100644 index dfe0730..0000000 --- a/oslo_messaging/_drivers/amqp1_driver/eventloop.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A thread that performs all messaging I/O and protocol event handling. - -This module provides a background thread that handles messaging operations -scheduled via the Controller, and performs blocking socket I/O and timer -processing. This thread is designed to be as simple as possible - all the -protocol specific intelligence is provided by the Controller and executed on -the background thread via callables. -""" - -import errno -import heapq -import logging -import os -import select -import socket -import sys -import threading -import time -import uuid - -import pyngus -from six import moves - -from oslo_messaging._i18n import _LE, _LI, _LW -LOG = logging.getLogger(__name__) - - -class _SocketConnection(object): - """Associates a pyngus Connection with a python network socket, - and handles all connection-related I/O and timer events. - """ - - def __init__(self, name, container, properties, handler): - self.name = name - self.socket = None - self._properties = properties or {} - self._properties["properties"] = self._get_name_and_pid() - # The handler is a pyngus ConnectionEventHandler, which is invoked by - # pyngus on connection-related events (active, closed, error, etc). - # Currently it is the Controller object. - self._handler = handler - self._container = container - self.connection = None - - def _get_name_and_pid(self): - # helps identify the process that is using the connection - return {u'process': os.path.basename(sys.argv[0]), u'pid': os.getpid()} - - def fileno(self): - """Allows use of a _SocketConnection in a select() call. - """ - return self.socket.fileno() - - def read(self): - """Called when socket is read-ready.""" - while True: - try: - rc = pyngus.read_socket_input(self.connection, self.socket) - self.connection.process(time.time()) - return rc - except (socket.timeout, socket.error) as e: - # pyngus handles EAGAIN/EWOULDBLOCK and EINTER - self.connection.close_input() - self.connection.close_output() - self._handler.socket_error(str(e)) - return pyngus.Connection.EOS - - def write(self): - """Called when socket is write-ready.""" - while True: - try: - rc = pyngus.write_socket_output(self.connection, self.socket) - self.connection.process(time.time()) - return rc - except (socket.timeout, socket.error) as e: - # pyngus handles EAGAIN/EWOULDBLOCK and EINTER - self.connection.close_output() - self.connection.close_input() - self._handler.socket_error(str(e)) - return pyngus.Connection.EOS - - def connect(self, host): - """Connect to host and start the AMQP protocol.""" - addr = socket.getaddrinfo(host.hostname, host.port, - socket.AF_INET, socket.SOCK_STREAM) - if not addr: - key = "%s:%i" % (host.hostname, host.port) - error = "Invalid peer address '%s'" % key - LOG.error(_LE("Invalid peer address '%s'"), key) - self._handler.socket_error(error) - return - my_socket = socket.socket(addr[0][0], addr[0][1], addr[0][2]) - my_socket.setblocking(0) # 0=non-blocking - my_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - try: - my_socket.connect(addr[0][4]) - except socket.error as e: - if e.errno != errno.EINPROGRESS: - error = "Socket connect failure '%s'" % str(e) - LOG.error(_LE("Socket connect failure '%s'"), str(e)) - self._handler.socket_error(error) - return - self.socket = my_socket - - props = self._properties.copy() - if pyngus.VERSION >= (2, 0, 0): - # configure client authentication - # - props['x-server'] = False - if host.username: - props['x-username'] = host.username - props['x-password'] = host.password or "" - - c = self._container.create_connection(self.name, self._handler, props) - c.user_context = self - self.connection = c - - if pyngus.VERSION < (2, 0, 0): - # older versions of pyngus requires manual SASL configuration: - # determine the proper SASL mechanism: PLAIN if a username/password - # is present, else ANONYMOUS - pn_sasl = self.connection.pn_sasl - if host.username: - password = host.password if host.password else "" - pn_sasl.plain(host.username, password) - else: - pn_sasl.mechanisms("ANONYMOUS") - # TODO(kgiusti): server if accepting inbound connections - pn_sasl.client() - - self.connection.open() - - def reset(self, name=None): - """Clean up the current state, expect 'connect()' to be recalled - later. - """ - # note well: since destroy() is called on the connection, do not invoke - # this method from a pyngus callback! - if self.connection: - self.connection.destroy() - self.connection = None - self.close() - if name: - self.name = name - - def close(self): - if self.socket: - self.socket.close() - self.socket = None - - -class Schedule(object): - """A list of callables (requests). Each callable may have a delay (in - milliseconds) which causes the callable to be scheduled to run after the - delay passes. - """ - def __init__(self): - self._entries = [] - - def schedule(self, request, delay): - """Request a callable be executed after delay.""" - entry = (time.time() + delay, request) - heapq.heappush(self._entries, entry) - - def get_delay(self, max_delay=None): - """Get the delay in milliseconds until the next callable needs to be - run, or 'max_delay' if no outstanding callables or the delay to the - next callable is > 'max_delay'. - """ - due = self._entries[0][0] if self._entries else None - if due is None: - return max_delay - now = time.time() - if due < now: - return 0 - else: - return min(due - now, max_delay) if max_delay else due - now - - def process(self): - """Invoke all expired callables.""" - while self._entries and self._entries[0][0] < time.time(): - heapq.heappop(self._entries)[1]() - - -class Requests(object): - """A queue of callables to execute from the eventloop thread's main - loop. - """ - def __init__(self): - self._requests = moves.queue.Queue(maxsize=10) - self._wakeup_pipe = os.pipe() - - def wakeup(self, request=None): - """Enqueue a callable to be executed by the eventloop, and force the - eventloop thread to wake up from select(). - """ - if request: - self._requests.put(request) - os.write(self._wakeup_pipe[1], b'!') - - def fileno(self): - """Allows this request queue to be used by select().""" - return self._wakeup_pipe[0] - - def read(self): - """Invoked by the eventloop thread, execute each queued callable.""" - os.read(self._wakeup_pipe[0], 512) - # first pop of all current tasks - requests = [] - while not self._requests.empty(): - requests.append(self._requests.get()) - # then process them, this allows callables to re-register themselves to - # be run on the next iteration of the I/O loop - for r in requests: - r() - - -class Thread(threading.Thread): - """Manages socket I/O and executes callables queued up by external - threads. - """ - def __init__(self, container_name=None): - super(Thread, self).__init__() - - # callables from other threads: - self._requests = Requests() - # delayed callables (only used on this thread for now): - self._schedule = Schedule() - - # Configure a container - if container_name is None: - container_name = "Container-" + uuid.uuid4().hex - self._container = pyngus.Container(container_name) - - self.name = "Thread for Proton container: %s" % self._container.name - self._shutdown = False - self.daemon = True - self.start() - - def wakeup(self, request=None): - """Wake up the eventloop thread, Optionally providing a callable to run - when the eventloop wakes up. Thread safe. - """ - self._requests.wakeup(request) - - def shutdown(self): - """Shutdown the eventloop thread. Thread safe. - """ - LOG.debug("eventloop shutdown requested") - self._shutdown = True - self.wakeup() - - def destroy(self): - # release the container. This can only be called after the eventloop - # thread exited - self._container.destroy() - self._container = None - - # the following methods are not thread safe - they must be run from the - # eventloop thread - - def schedule(self, request, delay): - """Invoke request after delay seconds.""" - self._schedule.schedule(request, delay) - - def connect(self, host, handler, properties=None, name=None): - """Get a _SocketConnection to a peer represented by url.""" - key = name or "%s:%i" % (host.hostname, host.port) - # return pre-existing - conn = self._container.get_connection(key) - if conn: - return conn.user_context - - # create a new connection - this will be stored in the - # container, using the specified name as the lookup key, or if - # no name was provided, the host:port combination - sc = _SocketConnection(key, self._container, - properties, handler=handler) - sc.connect(host) - return sc - - def run(self): - """Run the proton event/timer loop.""" - LOG.debug("Starting Proton thread, container=%s", - self._container.name) - - while not self._shutdown: - readers, writers, timers = self._container.need_processing() - - readfds = [c.user_context for c in readers] - # additionally, always check for readability of pipe we - # are using to wakeup processing thread by other threads - readfds.append(self._requests) - writefds = [c.user_context for c in writers] - - timeout = None - if timers: - deadline = timers[0].deadline # 0 == next expiring timer - now = time.time() - timeout = 0 if deadline <= now else deadline - now - - # adjust timeout for any deferred requests - timeout = self._schedule.get_delay(timeout) - - try: - results = select.select(readfds, writefds, [], timeout) - except select.error as serror: - if serror[0] == errno.EINTR: - LOG.warning(_LW("ignoring interrupt from select(): %s"), - str(serror)) - continue - raise # assuming fatal... - - readable, writable, ignore = results - - for r in readable: - r.read() - - for t in timers: - if t.deadline > time.time(): - break - t.process(time.time()) - - for w in writable: - w.write() - - self._schedule.process() # run any deferred requests - - LOG.info(_LI("eventloop thread exiting, container=%s"), - self._container.name) diff --git a/oslo_messaging/_drivers/amqp1_driver/opts.py b/oslo_messaging/_drivers/amqp1_driver/opts.py deleted file mode 100644 index ca12be4..0000000 --- a/oslo_messaging/_drivers/amqp1_driver/opts.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - - -amqp1_opts = [ - cfg.StrOpt('server_request_prefix', - default='exclusive', - deprecated_group='amqp1', - help="address prefix used when sending to a specific server"), - - cfg.StrOpt('broadcast_prefix', - default='broadcast', - deprecated_group='amqp1', - help="address prefix used when broadcasting to all servers"), - - cfg.StrOpt('group_request_prefix', - default='unicast', - deprecated_group='amqp1', - help="address prefix when sending to any server in group"), - - cfg.StrOpt('container_name', - deprecated_group='amqp1', - help='Name for the AMQP container'), - - cfg.IntOpt('idle_timeout', - default=0, # disabled - deprecated_group='amqp1', - help='Timeout for inactive connections (in seconds)'), - - cfg.BoolOpt('trace', - default=False, - deprecated_group='amqp1', - help='Debug: dump AMQP frames to stdout'), - - cfg.StrOpt('ssl_ca_file', - default='', - deprecated_group='amqp1', - help="CA certificate PEM file to verify server certificate"), - - cfg.StrOpt('ssl_cert_file', - default='', - deprecated_group='amqp1', - help='Identifying certificate PEM file to present to clients'), - - cfg.StrOpt('ssl_key_file', - default='', - deprecated_group='amqp1', - help='Private key PEM file used to sign cert_file certificate'), - - cfg.StrOpt('ssl_key_password', - deprecated_group='amqp1', - secret=True, - help='Password for decrypting ssl_key_file (if encrypted)'), - - cfg.BoolOpt('allow_insecure_clients', - default=False, - deprecated_group='amqp1', - help='Accept clients using either SSL or plain TCP'), - - cfg.StrOpt('sasl_mechanisms', - default='', - deprecated_group='amqp1', - help='Space separated list of acceptable SASL mechanisms'), - - cfg.StrOpt('sasl_config_dir', - default='', - deprecated_group='amqp1', - help='Path to directory that contains the SASL configuration'), - - cfg.StrOpt('sasl_config_name', - default='', - deprecated_group='amqp1', - help='Name of configuration file (without .conf suffix)'), - - cfg.StrOpt('username', - default='', - deprecated_group='amqp1', - help='User name for message broker authentication'), - - cfg.StrOpt('password', - default='', - deprecated_group='amqp1', - secret=True, - help='Password for message broker authentication') -] diff --git a/oslo_messaging/_drivers/amqpdriver.py b/oslo_messaging/_drivers/amqpdriver.py deleted file mode 100644 index f021592..0000000 --- a/oslo_messaging/_drivers/amqpdriver.py +++ /dev/null @@ -1,511 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = ['AMQPDriverBase'] - -import logging -import threading -import time -import uuid - -import cachetools -from oslo_utils import timeutils -from six import moves - -import oslo_messaging -from oslo_messaging._drivers import amqp as rpc_amqp -from oslo_messaging._drivers import base -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._i18n import _ -from oslo_messaging._i18n import _LE -from oslo_messaging._i18n import _LI -from oslo_messaging._i18n import _LW - -LOG = logging.getLogger(__name__) - - -class AMQPIncomingMessage(base.RpcIncomingMessage): - - def __init__(self, listener, ctxt, message, unique_id, msg_id, reply_q, - obsolete_reply_queues): - super(AMQPIncomingMessage, self).__init__(ctxt, message) - self.listener = listener - - self.unique_id = unique_id - self.msg_id = msg_id - self.reply_q = reply_q - self._obsolete_reply_queues = obsolete_reply_queues - self.stopwatch = timeutils.StopWatch() - self.stopwatch.start() - - def _send_reply(self, conn, reply=None, failure=None): - if not self._obsolete_reply_queues.reply_q_valid(self.reply_q, - self.msg_id): - return - - if failure: - failure = rpc_common.serialize_remote_exception(failure) - # NOTE(sileht): ending can be removed in N*, see Listener.wait() - # for more detail. - msg = {'result': reply, 'failure': failure, 'ending': True, - '_msg_id': self.msg_id} - rpc_amqp._add_unique_id(msg) - unique_id = msg[rpc_amqp.UNIQUE_ID] - - LOG.debug("sending reply msg_id: %(msg_id)s " - "reply queue: %(reply_q)s " - "time elapsed: %(elapsed)ss", { - 'msg_id': self.msg_id, - 'unique_id': unique_id, - 'reply_q': self.reply_q, - 'elapsed': self.stopwatch.elapsed()}) - conn.direct_send(self.reply_q, rpc_common.serialize_msg(msg)) - - def reply(self, reply=None, failure=None): - if not self.msg_id: - # NOTE(Alexei_987) not sending reply, if msg_id is empty - # because reply should not be expected by caller side - return - - # NOTE(sileht): return without hold the a connection if possible - if not self._obsolete_reply_queues.reply_q_valid(self.reply_q, - self.msg_id): - return - - # NOTE(sileht): we read the configuration value from the driver - # to be able to backport this change in previous version that - # still have the qpid driver - duration = self.listener.driver.missing_destination_retry_timeout - timer = rpc_common.DecayingTimer(duration=duration) - timer.start() - - while True: - try: - with self.listener.driver._get_connection( - rpc_common.PURPOSE_SEND) as conn: - self._send_reply(conn, reply, failure) - return - except rpc_amqp.AMQPDestinationNotFound: - if timer.check_return() > 0: - LOG.debug(("The reply %(msg_id)s cannot be sent " - "%(reply_q)s reply queue don't exist, " - "retrying..."), { - 'msg_id': self.msg_id, - 'reply_q': self.reply_q}) - time.sleep(0.25) - else: - self._obsolete_reply_queues.add(self.reply_q, self.msg_id) - LOG.info(_LI("The reply %(msg_id)s cannot be sent " - "%(reply_q)s reply queue don't exist after " - "%(duration)s sec abandoning..."), { - 'msg_id': self.msg_id, - 'reply_q': self.reply_q, - 'duration': duration}) - return - - def acknowledge(self): - self.message.acknowledge() - self.listener.msg_id_cache.add(self.unique_id) - - def requeue(self): - # NOTE(sileht): In case of the connection is lost between receiving the - # message and requeing it, this requeue call fail - # but because the message is not acknowledged and not added to the - # msg_id_cache, the message will be reconsumed, the only difference is - # the message stay at the beginning of the queue instead of moving to - # the end. - self.message.requeue() - - -class ObsoleteReplyQueuesCache(object): - """Cache of reply queue id that doesn't exists anymore. - - NOTE(sileht): In case of a broker restart/failover - a reply queue can be unreachable for short period - the IncomingMessage.send_reply will block for 60 seconds - in this case or until rabbit recovers. - - But in case of the reply queue is unreachable because the - rpc client is really gone, we can have a ton of reply to send - waiting 60 seconds. - This leads to a starvation of connection of the pool - The rpc server take to much time to send reply, other rpc client will - raise TimeoutError because their don't receive their replies in time. - - This object cache stores already known gone client to not wait 60 seconds - and hold a connection of the pool. - Keeping 200 last gone rpc client for 1 minute is enough - and doesn't hold to much memory. - """ - - SIZE = 200 - TTL = 60 - - def __init__(self): - self._lock = threading.RLock() - self._cache = cachetools.TTLCache(self.SIZE, self.TTL) - - def reply_q_valid(self, reply_q, msg_id): - if reply_q in self._cache: - self._no_reply_log(reply_q, msg_id) - return False - return True - - def add(self, reply_q, msg_id): - with self._lock: - self._cache.update({reply_q: msg_id}) - self._no_reply_log(reply_q, msg_id) - - def _no_reply_log(self, reply_q, msg_id): - LOG.warning(_LW("%(reply_queue)s doesn't exists, drop reply to " - "%(msg_id)s"), {'reply_queue': reply_q, - 'msg_id': msg_id}) - - -class AMQPListener(base.PollStyleListener): - - def __init__(self, driver, conn): - super(AMQPListener, self).__init__(driver.prefetch_size) - self.driver = driver - self.conn = conn - self.msg_id_cache = rpc_amqp._MsgIdCache() - self.incoming = [] - self._stopped = threading.Event() - self._obsolete_reply_queues = ObsoleteReplyQueuesCache() - - def __call__(self, message): - ctxt = rpc_amqp.unpack_context(message) - unique_id = self.msg_id_cache.check_duplicate_message(message) - if ctxt.msg_id: - LOG.debug("received message msg_id: %(msg_id)s reply to " - "%(queue)s", {'queue': ctxt.reply_q, - 'msg_id': ctxt.msg_id}) - else: - LOG.debug("received message with unique_id: %s", unique_id) - self.incoming.append(AMQPIncomingMessage(self, - ctxt.to_dict(), - message, - unique_id, - ctxt.msg_id, - ctxt.reply_q, - self._obsolete_reply_queues)) - - @base.batch_poll_helper - def poll(self, timeout=None): - while not self._stopped.is_set(): - if self.incoming: - return self.incoming.pop(0) - try: - self.conn.consume(timeout=timeout) - except rpc_common.Timeout: - return None - - def stop(self): - self._stopped.set() - self.conn.stop_consuming() - - def cleanup(self): - # Closes listener connection - self.conn.close() - - -class ReplyWaiters(object): - - WAKE_UP = object() - - def __init__(self): - self._queues = {} - self._wrn_threshold = 10 - - def get(self, msg_id, timeout): - try: - return self._queues[msg_id].get(block=True, timeout=timeout) - except moves.queue.Empty: - raise oslo_messaging.MessagingTimeout( - 'Timed out waiting for a reply ' - 'to message ID %s' % msg_id) - - def put(self, msg_id, message_data): - queue = self._queues.get(msg_id) - if not queue: - LOG.info(_LI('No calling threads waiting for msg_id : %s'), msg_id) - LOG.debug(' queues: %(queues)s, message: %(message)s', - {'queues': len(self._queues), 'message': message_data}) - else: - queue.put(message_data) - - def add(self, msg_id): - self._queues[msg_id] = moves.queue.Queue() - if len(self._queues) > self._wrn_threshold: - LOG.warning(_LW('Number of call queues is greater than warning ' - 'threshold: %(old_threshold)s. There could be a ' - 'leak. Increasing threshold to: %(threshold)s'), - {'old_threshold': self._wrn_threshold, - 'threshold': self._wrn_threshold * 2}) - self._wrn_threshold *= 2 - - def remove(self, msg_id): - del self._queues[msg_id] - - -class ReplyWaiter(object): - def __init__(self, reply_q, conn, allowed_remote_exmods): - self.conn = conn - self.allowed_remote_exmods = allowed_remote_exmods - self.msg_id_cache = rpc_amqp._MsgIdCache() - self.waiters = ReplyWaiters() - - self.conn.declare_direct_consumer(reply_q, self) - - self._thread_exit_event = threading.Event() - self._thread = threading.Thread(target=self.poll) - self._thread.daemon = True - self._thread.start() - - def stop(self): - if self._thread: - self._thread_exit_event.set() - self.conn.stop_consuming() - self._thread.join() - self._thread = None - - def poll(self): - while not self._thread_exit_event.is_set(): - try: - self.conn.consume() - except Exception: - LOG.exception(_LE("Failed to process incoming message, " - "retrying...")) - - def __call__(self, message): - message.acknowledge() - incoming_msg_id = message.pop('_msg_id', None) - if message.get('ending'): - LOG.debug("received reply msg_id: %s", incoming_msg_id) - self.waiters.put(incoming_msg_id, message) - - def listen(self, msg_id): - self.waiters.add(msg_id) - - def unlisten(self, msg_id): - self.waiters.remove(msg_id) - - @staticmethod - def _raise_timeout_exception(msg_id): - raise oslo_messaging.MessagingTimeout( - _('Timed out waiting for a reply to message ID %s.') % msg_id) - - def _process_reply(self, data): - self.msg_id_cache.check_duplicate_message(data) - if data['failure']: - failure = data['failure'] - result = rpc_common.deserialize_remote_exception( - failure, self.allowed_remote_exmods) - else: - result = data.get('result', None) - - ending = data.get('ending', False) - return result, ending - - def wait(self, msg_id, timeout): - # NOTE(sileht): for each msg_id we receive two amqp message - # first one with the payload, a second one to ensure the other - # have finish to send the payload - # NOTE(viktors): We are going to remove this behavior in the N - # release, but we need to keep backward compatibility, so we should - # support both cases for now. - timer = rpc_common.DecayingTimer(duration=timeout) - timer.start() - final_reply = None - ending = False - while not ending: - timeout = timer.check_return(self._raise_timeout_exception, msg_id) - try: - message = self.waiters.get(msg_id, timeout=timeout) - except moves.queue.Empty: - self._raise_timeout_exception(msg_id) - - reply, ending = self._process_reply(message) - if reply is not None: - # NOTE(viktors): This can be either first _send_reply() with an - # empty `result` field or a second _send_reply() with - # ending=True and no `result` field. - final_reply = reply - return final_reply - - -class AMQPDriverBase(base.BaseDriver): - missing_destination_retry_timeout = 0 - - def __init__(self, conf, url, connection_pool, - default_exchange=None, allowed_remote_exmods=None): - super(AMQPDriverBase, self).__init__(conf, url, default_exchange, - allowed_remote_exmods) - - self._default_exchange = default_exchange - - self._connection_pool = connection_pool - - self._reply_q_lock = threading.Lock() - self._reply_q = None - self._reply_q_conn = None - self._waiter = None - - def _get_exchange(self, target): - return target.exchange or self._default_exchange - - def _get_connection(self, purpose=rpc_common.PURPOSE_SEND): - return rpc_common.ConnectionContext(self._connection_pool, - purpose=purpose) - - def _get_reply_q(self): - with self._reply_q_lock: - if self._reply_q is not None: - return self._reply_q - - reply_q = 'reply_' + uuid.uuid4().hex - - conn = self._get_connection(rpc_common.PURPOSE_LISTEN) - - self._waiter = ReplyWaiter(reply_q, conn, - self._allowed_remote_exmods) - - self._reply_q = reply_q - self._reply_q_conn = conn - - return self._reply_q - - def _send(self, target, ctxt, message, - wait_for_reply=None, timeout=None, - envelope=True, notify=False, retry=None): - - # FIXME(markmc): remove this temporary hack - class Context(object): - def __init__(self, d): - self.d = d - - def to_dict(self): - return self.d - - context = Context(ctxt) - msg = message - - if wait_for_reply: - msg_id = uuid.uuid4().hex - msg.update({'_msg_id': msg_id}) - msg.update({'_reply_q': self._get_reply_q()}) - - rpc_amqp._add_unique_id(msg) - unique_id = msg[rpc_amqp.UNIQUE_ID] - - rpc_amqp.pack_context(msg, context) - - if envelope: - msg = rpc_common.serialize_msg(msg) - - if wait_for_reply: - self._waiter.listen(msg_id) - log_msg = "CALL msg_id: %s " % msg_id - else: - log_msg = "CAST unique_id: %s " % unique_id - - try: - with self._get_connection(rpc_common.PURPOSE_SEND) as conn: - if notify: - exchange = self._get_exchange(target) - log_msg += "NOTIFY exchange '%(exchange)s'" \ - " topic '%(topic)s'" % { - 'exchange': exchange, - 'topic': target.topic} - LOG.debug(log_msg) - conn.notify_send(exchange, target.topic, msg, retry=retry) - elif target.fanout: - log_msg += "FANOUT topic '%(topic)s'" % { - 'topic': target.topic} - LOG.debug(log_msg) - conn.fanout_send(target.topic, msg, retry=retry) - else: - topic = target.topic - exchange = self._get_exchange(target) - if target.server: - topic = '%s.%s' % (target.topic, target.server) - log_msg += "exchange '%(exchange)s'" \ - " topic '%(topic)s'" % { - 'exchange': exchange, - 'topic': target.topic} - LOG.debug(log_msg) - conn.topic_send(exchange_name=exchange, topic=topic, - msg=msg, timeout=timeout, retry=retry) - - if wait_for_reply: - result = self._waiter.wait(msg_id, timeout) - if isinstance(result, Exception): - raise result - return result - finally: - if wait_for_reply: - self._waiter.unlisten(msg_id) - - def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, - retry=None): - return self._send(target, ctxt, message, wait_for_reply, timeout, - retry=retry) - - def send_notification(self, target, ctxt, message, version, retry=None): - return self._send(target, ctxt, message, - envelope=(version == 2.0), notify=True, retry=retry) - - def listen(self, target, batch_size, batch_timeout): - conn = self._get_connection(rpc_common.PURPOSE_LISTEN) - - listener = AMQPListener(self, conn) - - conn.declare_topic_consumer(exchange_name=self._get_exchange(target), - topic=target.topic, - callback=listener) - conn.declare_topic_consumer(exchange_name=self._get_exchange(target), - topic='%s.%s' % (target.topic, - target.server), - callback=listener) - conn.declare_fanout_consumer(target.topic, listener) - - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - conn = self._get_connection(rpc_common.PURPOSE_LISTEN) - - listener = AMQPListener(self, conn) - for target, priority in targets_and_priorities: - conn.declare_topic_consumer( - exchange_name=self._get_exchange(target), - topic='%s.%s' % (target.topic, priority), - callback=listener, queue_name=pool) - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def cleanup(self): - if self._connection_pool: - self._connection_pool.empty() - self._connection_pool = None - - with self._reply_q_lock: - if self._reply_q is not None: - self._waiter.stop() - self._reply_q_conn.close() - self._reply_q_conn = None - self._reply_q = None - self._waiter = None diff --git a/oslo_messaging/_drivers/base.py b/oslo_messaging/_drivers/base.py deleted file mode 100644 index 24d703f..0000000 --- a/oslo_messaging/_drivers/base.py +++ /dev/null @@ -1,274 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import threading - -from oslo_config import cfg -from oslo_utils import excutils -from oslo_utils import timeutils -import six - - -from oslo_messaging import exceptions - -base_opts = [ - cfg.IntOpt('rpc_conn_pool_size', default=30, - deprecated_group='DEFAULT', - help='Size of RPC connection pool.'), - cfg.IntOpt('conn_pool_min_size', default=2, - help='The pool size limit for connections expiration policy'), - cfg.IntOpt('conn_pool_ttl', default=1200, - help='The time-to-live in sec of idle connections in the pool') -] - - -def batch_poll_helper(func): - """Decorator to poll messages in batch - - This decorator helps driver that polls message one by one, - to returns a list of message. - """ - def wrapper(in_self, timeout=None, batch_size=1, batch_timeout=None): - incomings = [] - driver_prefetch = in_self.prefetch_size - if driver_prefetch > 0: - batch_size = min(batch_size, driver_prefetch) - - with timeutils.StopWatch(timeout) as timeout_watch: - # poll first message - msg = func(in_self, timeout=timeout_watch.leftover(True)) - if msg is not None: - incomings.append(msg) - if batch_size == 1 or msg is None: - return incomings - - # update batch_timeout according to timeout for whole operation - timeout_left = timeout_watch.leftover(True) - if timeout_left is not None and ( - batch_timeout is None or timeout_left < batch_timeout): - batch_timeout = timeout_left - - with timeutils.StopWatch(batch_timeout) as batch_timeout_watch: - # poll remained batch messages - while len(incomings) < batch_size and msg is not None: - msg = func(in_self, timeout=batch_timeout_watch.leftover(True)) - if msg is not None: - incomings.append(msg) - - return incomings - return wrapper - - -class TransportDriverError(exceptions.MessagingException): - """Base class for transport driver specific exceptions.""" - - -@six.add_metaclass(abc.ABCMeta) -class IncomingMessage(object): - - def __init__(self, ctxt, message): - self.ctxt = ctxt - self.message = message - - def acknowledge(self): - """Acknowledge the message.""" - - @abc.abstractmethod - def requeue(self): - """Requeue the message.""" - - -@six.add_metaclass(abc.ABCMeta) -class RpcIncomingMessage(IncomingMessage): - - @abc.abstractmethod - def reply(self, reply=None, failure=None): - """Send a reply or failure back to the client.""" - - -@six.add_metaclass(abc.ABCMeta) -class PollStyleListener(object): - def __init__(self, prefetch_size=-1): - self.prefetch_size = prefetch_size - - @abc.abstractmethod - def poll(self, timeout=None, batch_size=1, batch_timeout=None): - """Blocking until 'batch_size' message is pending and return - [IncomingMessage]. - Waits for first message. Then waits for next batch_size-1 messages - during batch window defined by batch_timeout - This method block current thread until message comes, stop() is - executed by another thread or timemout is elapsed. - """ - - def stop(self): - """Stop listener. - Stop the listener message polling - """ - pass - - def cleanup(self): - """Cleanup listener. - Close connection (socket) used by listener if any. - As this is listener specific method, overwrite it in to derived class - if cleanup of listener required. - """ - pass - - -@six.add_metaclass(abc.ABCMeta) -class Listener(object): - def __init__(self, batch_size, batch_timeout, - prefetch_size=-1): - """Init Listener - - :param batch_size: desired number of messages passed to - single on_incoming_callback notification - :param batch_timeout: defines how long should we wait for batch_size - messages if we already have some messages waiting for processing - :param prefetch_size: defines how many massages we want to prefetch - from backend (depend on driver type) by single request - """ - self.on_incoming_callback = None - self.batch_timeout = batch_timeout - self.prefetch_size = prefetch_size - if prefetch_size > 0: - batch_size = min(batch_size, prefetch_size) - self.batch_size = batch_size - - def start(self, on_incoming_callback): - """Start listener. - Start the listener message polling - - :param on_incoming_callback: callback function to be executed when - listener received messages. Messages should be processed and - acked/nacked by callback - """ - self.on_incoming_callback = on_incoming_callback - - def stop(self): - """Stop listener. - Stop the listener message polling - """ - self.on_incoming_callback = None - - @abc.abstractmethod - def cleanup(self): - """Cleanup listener. - Close connection (socket) used by listener if any. - As this is listener specific method, overwrite it in to derived class - if cleanup of listener required. - """ - - -class PollStyleListenerAdapter(Listener): - def __init__(self, poll_style_listener, batch_size, batch_timeout): - super(PollStyleListenerAdapter, self).__init__( - batch_size, batch_timeout, poll_style_listener.prefetch_size - ) - self._poll_style_listener = poll_style_listener - self._listen_thread = threading.Thread(target=self._runner) - self._listen_thread.daemon = True - self._started = False - - def start(self, on_incoming_callback): - """Start listener. - Start the listener message polling - - :param on_incoming_callback: callback function to be executed when - listener received messages. Messages should be processed and - acked/nacked by callback - """ - super(PollStyleListenerAdapter, self).start(on_incoming_callback) - self._started = True - self._listen_thread.start() - - @excutils.forever_retry_uncaught_exceptions - def _runner(self): - while self._started: - incoming = self._poll_style_listener.poll( - batch_size=self.batch_size, batch_timeout=self.batch_timeout) - - if incoming: - self.on_incoming_callback(incoming) - - # listener is stopped but we need to process all already consumed - # messages - while True: - incoming = self._poll_style_listener.poll( - batch_size=self.batch_size, batch_timeout=self.batch_timeout) - - if not incoming: - return - self.on_incoming_callback(incoming) - - def stop(self): - """Stop listener. - Stop the listener message polling - """ - self._started = False - self._poll_style_listener.stop() - self._listen_thread.join() - super(PollStyleListenerAdapter, self).stop() - - def cleanup(self): - """Cleanup listener. - Close connection (socket) used by listener if any. - As this is listener specific method, overwrite it in to derived class - if cleanup of listener required. - """ - self._poll_style_listener.cleanup() - - -@six.add_metaclass(abc.ABCMeta) -class BaseDriver(object): - prefetch_size = 0 - - def __init__(self, conf, url, - default_exchange=None, allowed_remote_exmods=None): - self.conf = conf - self._url = url - self._default_exchange = default_exchange - self._allowed_remote_exmods = allowed_remote_exmods or [] - - def require_features(self, requeue=False): - if requeue: - raise NotImplementedError('Message requeueing not supported by ' - 'this transport driver') - - @abc.abstractmethod - def send(self, target, ctxt, message, - wait_for_reply=None, timeout=None, envelope=False): - """Send a message to the given target.""" - - @abc.abstractmethod - def send_notification(self, target, ctxt, message, version): - """Send a notification message to the given target.""" - - @abc.abstractmethod - def listen(self, target, batch_size, batch_timeout): - """Construct a Listener for the given target.""" - - @abc.abstractmethod - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - """Construct a notification Listener for the given list of - tuple of (target, priority). - """ - - @abc.abstractmethod - def cleanup(self): - """Release all resources.""" diff --git a/oslo_messaging/_drivers/common.py b/oslo_messaging/_drivers/common.py deleted file mode 100644 index 539998b..0000000 --- a/oslo_messaging/_drivers/common.py +++ /dev/null @@ -1,509 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2011 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import copy -import logging -import sys -import traceback - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import six - -import oslo_messaging -from oslo_messaging._i18n import _ -from oslo_messaging._i18n import _LE -from oslo_messaging import _utils as utils - -LOG = logging.getLogger(__name__) - -_EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' - - -'''RPC Envelope Version. - -This version number applies to the top level structure of messages sent out. -It does *not* apply to the message payload, which must be versioned -independently. For example, when using rpc APIs, a version number is applied -for changes to the API being exposed over rpc. This version number is handled -in the rpc proxy and dispatcher modules. - -This version number applies to the message envelope that is used in the -serialization done inside the rpc layer. See serialize_msg() and -deserialize_msg(). - -The current message format (version 2.0) is very simple. It is: - - { - 'oslo.version': , - 'oslo.message': - } - -Message format version '1.0' is just considered to be the messages we sent -without a message envelope. - -So, the current message envelope just includes the envelope version. It may -eventually contain additional information, such as a signature for the message -payload. - -We will JSON encode the application message payload. The message envelope, -which includes the JSON encoded application message body, will be passed down -to the messaging libraries as a dict. -''' -_RPC_ENVELOPE_VERSION = '2.0' - -_VERSION_KEY = 'oslo.version' -_MESSAGE_KEY = 'oslo.message' - -_REMOTE_POSTFIX = '_Remote' - - -class RPCException(Exception): - msg_fmt = _("An unknown RPC related exception occurred.") - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if not message: - try: - message = self.msg_fmt % kwargs - - except Exception: - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception(_LE('Exception in string format operation, ' - 'kwargs are:')) - for name, value in six.iteritems(kwargs): - LOG.error("%s: %s", name, value) - # at least get the core message out if something happened - message = self.msg_fmt - - super(RPCException, self).__init__(message) - - -class Timeout(RPCException): - """Signifies that a timeout has occurred. - - This exception is raised if the rpc_response_timeout is reached while - waiting for a response from the remote side. - """ - msg_fmt = _('Timeout while waiting on RPC response - ' - 'topic: "%(topic)s", RPC method: "%(method)s" ' - 'info: "%(info)s"') - - def __init__(self, info=None, topic=None, method=None): - """Initiates Timeout object. - - :param info: Extra info to convey to the user - :param topic: The topic that the rpc call was sent to - :param method: The name of the rpc method being - called - """ - self.info = info - self.topic = topic - self.method = method - super(Timeout, self).__init__( - None, - info=info or _(''), - topic=topic or _(''), - method=method or _('')) - - -class DuplicateMessageError(RPCException): - msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.") - - -class InvalidRPCConnectionReuse(RPCException): - msg_fmt = _("Invalid reuse of an RPC connection.") - - -class UnsupportedRpcVersion(RPCException): - msg_fmt = _("Specified RPC version, %(version)s, not supported by " - "this endpoint.") - - -class UnsupportedRpcEnvelopeVersion(RPCException): - msg_fmt = _("Specified RPC envelope version, %(version)s, " - "not supported by this endpoint.") - - -class RpcVersionCapError(RPCException): - msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low") - - -class Connection(object): - """A connection, returned by rpc.create_connection(). - - This class represents a connection to the message bus used for rpc. - An instance of this class should never be created by users of the rpc API. - Use rpc.create_connection() instead. - """ - def close(self): - """Close the connection. - - This method must be called when the connection will no longer be used. - It will ensure that any resources associated with the connection, such - as a network connection, and cleaned up. - """ - raise NotImplementedError() - - -def serialize_remote_exception(failure_info): - """Prepares exception data to be sent over rpc. - - Failure_info should be a sys.exc_info() tuple. - - """ - tb = traceback.format_exception(*failure_info) - - failure = failure_info[1] - - kwargs = {} - if hasattr(failure, 'kwargs'): - kwargs = failure.kwargs - - # NOTE(matiu): With cells, it's possible to re-raise remote, remote - # exceptions. Lets turn it back into the original exception type. - cls_name = six.text_type(failure.__class__.__name__) - mod_name = six.text_type(failure.__class__.__module__) - if (cls_name.endswith(_REMOTE_POSTFIX) and - mod_name.endswith(_REMOTE_POSTFIX)): - cls_name = cls_name[:-len(_REMOTE_POSTFIX)] - mod_name = mod_name[:-len(_REMOTE_POSTFIX)] - - data = { - 'class': cls_name, - 'module': mod_name, - 'message': six.text_type(failure), - 'tb': tb, - 'args': failure.args, - 'kwargs': kwargs - } - - json_data = jsonutils.dumps(data) - - return json_data - - -def deserialize_remote_exception(data, allowed_remote_exmods): - failure = jsonutils.loads(six.text_type(data)) - - trace = failure.get('tb', []) - message = failure.get('message', "") + "\n" + "\n".join(trace) - name = failure.get('class') - module = failure.get('module') - - # NOTE(ameade): We DO NOT want to allow just any module to be imported, in - # order to prevent arbitrary code execution. - if module != _EXCEPTIONS_MODULE and module not in allowed_remote_exmods: - return oslo_messaging.RemoteError(name, failure.get('message'), trace) - - try: - __import__(module) - mod = sys.modules[module] - klass = getattr(mod, name) - if not issubclass(klass, Exception): - raise TypeError("Can only deserialize Exceptions") - - failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) - except (AttributeError, TypeError, ImportError): - return oslo_messaging.RemoteError(name, failure.get('message'), trace) - - ex_type = type(failure) - str_override = lambda self: message - new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), - {'__str__': str_override, '__unicode__': str_override}) - new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) - try: - # NOTE(ameade): Dynamically create a new exception type and swap it in - # as the new type for the exception. This only works on user defined - # Exceptions and not core Python exceptions. This is important because - # we cannot necessarily change an exception message so we must override - # the __str__ method. - failure.__class__ = new_ex_type - except TypeError: - # NOTE(ameade): If a core exception then just add the traceback to the - # first exception argument. - failure.args = (message,) + failure.args[1:] - return failure - - -class CommonRpcContext(object): - def __init__(self, **kwargs): - self.values = kwargs - - def __getattr__(self, key): - try: - return self.values[key] - except KeyError: - raise AttributeError(key) - - def to_dict(self): - return copy.deepcopy(self.values) - - @classmethod - def from_dict(cls, values): - return cls(**values) - - def deepcopy(self): - return self.from_dict(self.to_dict()) - - def update_store(self): - # local.store.context = self - pass - - -class ClientException(Exception): - """Encapsulates actual exception expected to be hit by a RPC proxy object. - - Merely instantiating it records the current exception information, which - will be passed back to the RPC client without exceptional logging. - """ - def __init__(self): - self._exc_info = sys.exc_info() - - -def serialize_msg(raw_msg): - # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more - # information about this format. - msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, - _MESSAGE_KEY: jsonutils.dumps(raw_msg)} - - return msg - - -def deserialize_msg(msg): - # NOTE(russellb): Hang on to your hats, this road is about to - # get a little bumpy. - # - # Robustness Principle: - # "Be strict in what you send, liberal in what you accept." - # - # At this point we have to do a bit of guessing about what it - # is we just received. Here is the set of possibilities: - # - # 1) We received a dict. This could be 2 things: - # - # a) Inspect it to see if it looks like a standard message envelope. - # If so, great! - # - # b) If it doesn't look like a standard message envelope, it could either - # be a notification, or a message from before we added a message - # envelope (referred to as version 1.0). - # Just return the message as-is. - # - # 2) It's any other non-dict type. Just return it and hope for the best. - # This case covers return values from rpc.call() from before message - # envelopes were used. (messages to call a method were always a dict) - - if not isinstance(msg, dict): - # See #2 above. - return msg - - base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) - if not all(map(lambda key: key in msg, base_envelope_keys)): - # See #1.b above. - return msg - - # At this point we think we have the message envelope - # format we were expecting. (#1.a above) - - if not utils.version_is_compatible(_RPC_ENVELOPE_VERSION, - msg[_VERSION_KEY]): - raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) - - raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) - - return raw_msg - - -class DecayingTimer(object): - def __init__(self, duration=None): - self._watch = timeutils.StopWatch(duration=duration) - - def start(self): - self._watch.start() - - def check_return(self, timeout_callback=None, *args, **kwargs): - maximum = kwargs.pop('maximum', None) - left = self._watch.leftover(return_none=True) - if left is None: - return maximum - if left <= 0 and timeout_callback is not None: - timeout_callback(*args, **kwargs) - return left if maximum is None else min(left, maximum) - - -# NOTE(sileht): Even if rabbit has only one Connection class, -# this connection can be used for two purposes: -# * wait and receive amqp messages (only do read stuffs on the socket) -# * send messages to the broker (only do write stuffs on the socket) -# The code inside a connection class is not concurrency safe. -# Using one Connection class instance for doing both, will result -# of eventlet complaining of multiple greenthreads that read/write the -# same fd concurrently... because 'send' and 'listen' run in different -# greenthread. -# So, a connection cannot be shared between thread/greenthread and -# this two variables permit to define the purpose of the connection -# to allow drivers to add special handling if needed (like heatbeat). -# amqp drivers create 3 kind of connections: -# * driver.listen*(): each call create a new 'PURPOSE_LISTEN' connection -# * driver.send*(): a pool of 'PURPOSE_SEND' connections is used -# * driver internally have another 'PURPOSE_LISTEN' connection dedicated -# to wait replies of rpc call -PURPOSE_LISTEN = 'listen' -PURPOSE_SEND = 'send' - - -class ConnectionContext(Connection): - """The class that is actually returned to the create_connection() caller. - - This is essentially a wrapper around Connection that supports 'with'. - It can also return a new Connection, or one from a pool. - - The function will also catch when an instance of this class is to be - deleted. With that we can return Connections to the pool on exceptions - and so forth without making the caller be responsible for catching them. - If possible the function makes sure to return a connection to the pool. - """ - - def __init__(self, connection_pool, purpose): - """Create a new connection, or get one from the pool.""" - self.connection = None - self.connection_pool = connection_pool - pooled = purpose == PURPOSE_SEND - if pooled: - self.connection = connection_pool.get() - else: - # a non-pooled connection is requested, so create a new connection - self.connection = connection_pool.create(purpose) - self.pooled = pooled - self.connection.pooled = pooled - - def __enter__(self): - """When with ConnectionContext() is used, return self.""" - return self - - def _done(self): - """If the connection came from a pool, clean it up and put it back. - If it did not come from a pool, close it. - """ - if self.connection: - if self.pooled: - # Reset the connection so it's ready for the next caller - # to grab from the pool - try: - self.connection.reset() - except Exception: - LOG.exception(_LE("Fail to reset the connection, drop it")) - try: - self.connection.close() - except Exception: - pass - self.connection = self.connection_pool.create() - finally: - self.connection_pool.put(self.connection) - else: - try: - self.connection.close() - except Exception: - pass - self.connection = None - - def __exit__(self, exc_type, exc_value, tb): - """End of 'with' statement. We're done here.""" - self._done() - - def __del__(self): - """Caller is done with this connection. Make sure we cleaned up.""" - self._done() - - def close(self): - """Caller is done with this connection.""" - self._done() - - def __getattr__(self, key): - """Proxy all other calls to the Connection instance.""" - if self.connection: - return getattr(self.connection, key) - else: - raise InvalidRPCConnectionReuse() - - -class ConfigOptsProxy(collections.Mapping): - """Proxy for oslo_config.cfg.ConfigOpts. - - Values from the query part of the transport url (if they are both present - and valid) override corresponding values from the configuration. - """ - - def __init__(self, conf, url): - self._conf = conf - self._url = url - - def __getattr__(self, name): - value = getattr(self._conf, name) - if isinstance(value, self._conf.GroupAttr): - return self.GroupAttrProxy(self._conf, name, value, self._url) - return value - - def __getitem__(self, name): - return self.__getattr__(name) - - def __contains__(self, name): - return name in self._conf - - def __iter__(self): - return iter(self._conf) - - def __len__(self): - return len(self._conf) - - class GroupAttrProxy(collections.Mapping): - """Internal helper proxy for oslo_config.cfg.ConfigOpts.GroupAttr.""" - - _VOID_MARKER = object() - - def __init__(self, conf, group_name, group, url): - self._conf = conf - self._group_name = group_name - self._group = group - self._url = url - - def __getattr__(self, opt_name): - # Make sure that the group has this specific option - opt_value_conf = getattr(self._group, opt_name) - # If the option is also present in the url and has a valid - # (i.e. convertible) value type, then try to override it - opt_value_url = self._url.query.get(opt_name, self._VOID_MARKER) - if opt_value_url is self._VOID_MARKER: - return opt_value_conf - opt_info = self._conf._get_opt_info(opt_name, self._group_name) - return opt_info['opt'].type(opt_value_url) - - def __getitem__(self, opt_name): - return self.__getattr__(opt_name) - - def __contains__(self, opt_name): - return opt_name in self._group - - def __iter__(self): - return iter(self._group) - - def __len__(self): - return len(self._group) diff --git a/oslo_messaging/_drivers/impl_amqp1.py b/oslo_messaging/_drivers/impl_amqp1.py deleted file mode 100644 index 46fce39..0000000 --- a/oslo_messaging/_drivers/impl_amqp1.py +++ /dev/null @@ -1,299 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Driver for the 'amqp' transport. - -This module provides a transport driver that speaks version 1.0 of the AMQP -messaging protocol. The driver sends messages and creates subscriptions via -'tasks' that are performed on its behalf via the controller module. -""" - -import collections -import logging -import os -import threading -import time -import uuid - -from oslo_serialization import jsonutils -from oslo_utils import importutils -from oslo_utils import timeutils - -from oslo_messaging._drivers import base -from oslo_messaging._drivers import common -from oslo_messaging._i18n import _LI, _LW -from oslo_messaging import target as messaging_target - - -proton = importutils.try_import('proton') -controller = importutils.try_import( - 'oslo_messaging._drivers.amqp1_driver.controller' -) -drivertasks = importutils.try_import( - 'oslo_messaging._drivers.amqp1_driver.drivertasks' -) -LOG = logging.getLogger(__name__) - - -def marshal_response(reply=None, failure=None): - # TODO(grs): do replies have a context? - # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't - # have support for vbin8. - msg = proton.Message(inferred=True) - if failure: - failure = common.serialize_remote_exception(failure) - data = {"failure": failure} - else: - data = {"response": reply} - msg.body = jsonutils.dumps(data) - return msg - - -def unmarshal_response(message, allowed): - # TODO(kgiusti) This may fail to unpack and raise an exception. Need to - # communicate this to the caller! - data = jsonutils.loads(message.body) - failure = data.get('failure') - if failure is not None: - raise common.deserialize_remote_exception(failure, allowed) - return data.get("response") - - -def marshal_request(request, context, envelope): - # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't - # have support for vbin8. - msg = proton.Message(inferred=True) - if envelope: - request = common.serialize_msg(request) - data = { - "request": request, - "context": context - } - msg.body = jsonutils.dumps(data) - return msg - - -def unmarshal_request(message): - data = jsonutils.loads(message.body) - msg = common.deserialize_msg(data.get("request")) - return (msg, data.get("context")) - - -class ProtonIncomingMessage(base.RpcIncomingMessage): - def __init__(self, listener, ctxt, request, message): - super(ProtonIncomingMessage, self).__init__(ctxt, request) - self.listener = listener - self._reply_to = message.reply_to - self._correlation_id = message.id - - def reply(self, reply=None, failure=None): - """Schedule a ReplyTask to send the reply.""" - if self._reply_to: - response = marshal_response(reply=reply, failure=failure) - response.correlation_id = self._correlation_id - LOG.debug("Replying to %s", self._correlation_id) - task = drivertasks.ReplyTask(self._reply_to, response) - self.listener.driver._ctrl.add_task(task) - else: - LOG.debug("Ignoring reply as no reply address available") - - def acknowledge(self): - pass - - def requeue(self): - pass - - -class Queue(object): - def __init__(self): - self._queue = collections.deque() - self._lock = threading.Lock() - self._pop_wake_condition = threading.Condition(self._lock) - self._started = True - - def put(self, item): - with self._lock: - self._queue.appendleft(item) - self._pop_wake_condition.notify() - - def pop(self, timeout): - with timeutils.StopWatch(timeout) as stop_watcher: - with self._lock: - while len(self._queue) == 0: - if stop_watcher.expired() or not self._started: - return None - self._pop_wake_condition.wait( - stop_watcher.leftover(return_none=True) - ) - return self._queue.pop() - - def stop(self): - with self._lock: - self._started = False - self._pop_wake_condition.notify_all() - - -class ProtonListener(base.PollStyleListener): - def __init__(self, driver): - super(ProtonListener, self).__init__(driver.prefetch_size) - self.driver = driver - self.incoming = Queue() - self.id = uuid.uuid4().hex - - def stop(self): - self.incoming.stop() - - @base.batch_poll_helper - def poll(self, timeout=None): - message = self.incoming.pop(timeout) - if message is None: - return None - request, ctxt = unmarshal_request(message) - LOG.debug("Returning incoming message") - return ProtonIncomingMessage(self, ctxt, request, message) - - -class ProtonDriver(base.BaseDriver): - """AMQP 1.0 Driver - - See :doc:`AMQP1.0` for details. - """ - - def __init__(self, conf, url, - default_exchange=None, allowed_remote_exmods=[]): - # TODO(kgiusti) Remove once driver fully stabilizes: - LOG.warning(_LW("Support for the 'amqp' transport is EXPERIMENTAL.")) - if proton is None or hasattr(controller, "fake_controller"): - raise NotImplementedError("Proton AMQP C libraries not installed") - - super(ProtonDriver, self).__init__(conf, url, default_exchange, - allowed_remote_exmods) - # TODO(grs): handle authentication etc - self._hosts = url.hosts - self._conf = conf - self._default_exchange = default_exchange - - # lazy connection setup - don't create the controller until - # after the first messaging request: - self._ctrl = None - self._pid = None - self._lock = threading.Lock() - - def _ensure_connect_called(func): - """Causes a new controller to be created when the messaging service is - first used by the current process. It is safe to push tasks to it - whether connected or not, but those tasks won't be processed until - connection completes. - """ - def wrap(self, *args, **kws): - with self._lock: - # check to see if a fork was done after the Controller and its - # I/O thread was spawned. old_pid will be None the first time - # this is called which will cause the Controller to be created. - old_pid = self._pid - self._pid = os.getpid() - - if old_pid != self._pid: - if self._ctrl is not None: - # fork was called after the Controller was created, and - # we are now executing as the child process. Do not - # touch the existing Controller - it is owned by the - # parent. Best we can do here is simply drop it and - # hope we get lucky. - LOG.warning(_LW("Process forked after connection " - "established!")) - self._ctrl = None - # Create a Controller that connects to the messaging - # service: - self._ctrl = controller.Controller(self._hosts, - self._default_exchange, - self._conf) - self._ctrl.connect() - return func(self, *args, **kws) - return wrap - - @_ensure_connect_called - def send(self, target, ctxt, message, - wait_for_reply=None, timeout=None, envelope=False, - retry=None): - """Send a message to the given target.""" - # TODO(kgiusti) need to add support for retry - if retry is not None: - raise NotImplementedError('"retry" not implemented by ' - 'this transport driver') - request = marshal_request(message, ctxt, envelope) - expire = 0 - if timeout: - expire = time.time() + timeout # when the caller times out - # amqp uses millisecond time values, timeout is seconds - request.ttl = int(timeout * 1000) - request.expiry_time = int(expire * 1000) - LOG.debug("Send to %s", target) - task = drivertasks.SendTask(target, request, wait_for_reply, expire) - self._ctrl.add_task(task) - # wait for the eventloop to process the command. If the command is - # an RPC call retrieve the reply message - - if wait_for_reply: - reply = task.wait(timeout) - if reply: - # TODO(kgiusti) how to handle failure to un-marshal? - # Must log, and determine best way to communicate this failure - # back up to the caller - reply = unmarshal_response(reply, self._allowed_remote_exmods) - LOG.debug("Send to %s returning", target) - return reply - - @_ensure_connect_called - def send_notification(self, target, ctxt, message, version, - retry=None): - """Send a notification message to the given target.""" - # TODO(kgiusti) need to add support for retry - if retry is not None: - raise NotImplementedError('"retry" not implemented by ' - 'this transport driver') - return self.send(target, ctxt, message, envelope=(version == 2.0)) - - @_ensure_connect_called - def listen(self, target, batch_size, batch_timeout): - """Construct a Listener for the given target.""" - LOG.debug("Listen to %s", target) - listener = ProtonListener(self) - self._ctrl.add_task(drivertasks.ListenTask(target, listener)) - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - return listener - - @_ensure_connect_called - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - LOG.debug("Listen for notifications %s", targets_and_priorities) - if pool: - raise NotImplementedError('"pool" not implemented by ' - 'this transport driver') - listener = ProtonListener(self) - for target, priority in targets_and_priorities: - topic = '%s.%s' % (target.topic, priority) - t = messaging_target.Target(topic=topic) - self._ctrl.add_task(drivertasks.ListenTask(t, listener, True)) - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def cleanup(self): - """Release all resources.""" - if self._ctrl: - self._ctrl.shutdown() - self._ctrl = None - LOG.info(_LI("AMQP 1.0 messaging driver shutdown")) diff --git a/oslo_messaging/_drivers/impl_fake.py b/oslo_messaging/_drivers/impl_fake.py deleted file mode 100644 index f25e8b7..0000000 --- a/oslo_messaging/_drivers/impl_fake.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import json -import threading -import time - -from six import moves - -import oslo_messaging -from oslo_messaging._drivers import base - - -class FakeIncomingMessage(base.RpcIncomingMessage): - def __init__(self, ctxt, message, reply_q, requeue): - super(FakeIncomingMessage, self).__init__(ctxt, message) - self.requeue_callback = requeue - self._reply_q = reply_q - - def reply(self, reply=None, failure=None): - if self._reply_q: - failure = failure[1] if failure else None - self._reply_q.put((reply, failure)) - - def requeue(self): - self.requeue_callback() - - -class FakeListener(base.PollStyleListener): - - def __init__(self, exchange_manager, targets, pool=None): - super(FakeListener, self).__init__() - self._exchange_manager = exchange_manager - self._targets = targets - self._pool = pool - self._stopped = threading.Event() - - # NOTE(sileht): Ensure that all needed queues exists even the listener - # have not been polled yet - for target in self._targets: - exchange = self._exchange_manager.get_exchange(target.exchange) - exchange.ensure_queue(target, pool) - - @base.batch_poll_helper - def poll(self, timeout=None): - if timeout is not None: - deadline = time.time() + timeout - else: - deadline = None - while not self._stopped.is_set(): - for target in self._targets: - exchange = self._exchange_manager.get_exchange(target.exchange) - (ctxt, message, reply_q, requeue) = exchange.poll(target, - self._pool) - if message is not None: - message = FakeIncomingMessage(ctxt, message, reply_q, - requeue) - return message - if deadline is not None: - pause = deadline - time.time() - if pause < 0: - break - pause = min(pause, 0.050) - else: - pause = 0.050 - time.sleep(pause) - return None - - def stop(self): - self._stopped.set() - - -class FakeExchange(object): - - def __init__(self, name): - self.name = name - self._queues_lock = threading.RLock() - self._topic_queues = {} - self._server_queues = {} - - def ensure_queue(self, target, pool): - with self._queues_lock: - if target.server: - self._get_server_queue(target.topic, target.server) - else: - self._get_topic_queue(target.topic, pool) - - def _get_topic_queue(self, topic, pool=None): - if pool and (topic, pool) not in self._topic_queues: - # NOTE(sileht): if the pool name is set, we need to - # copy all the already delivered messages from the - # default queue to this queue - self._topic_queues[(topic, pool)] = copy.deepcopy( - self._get_topic_queue(topic)) - return self._topic_queues.setdefault((topic, pool), []) - - def _get_server_queue(self, topic, server): - return self._server_queues.setdefault((topic, server), []) - - def deliver_message(self, topic, ctxt, message, - server=None, fanout=False, reply_q=None): - with self._queues_lock: - if fanout: - queues = [q for t, q in self._server_queues.items() - if t[0] == topic] - elif server is not None: - queues = [self._get_server_queue(topic, server)] - else: - # NOTE(sileht): ensure at least the queue without - # pool name exists - self._get_topic_queue(topic) - queues = [q for t, q in self._topic_queues.items() - if t[0] == topic] - - def requeue(): - self.deliver_message(topic, ctxt, message, server=server, - fanout=fanout, reply_q=reply_q) - - for queue in queues: - queue.append((ctxt, message, reply_q, requeue)) - - def poll(self, target, pool): - with self._queues_lock: - if target.server: - queue = self._get_server_queue(target.topic, target.server) - else: - queue = self._get_topic_queue(target.topic, pool) - return queue.pop(0) if queue else (None, None, None, None) - - -class FakeExchangeManager(object): - def __init__(self, default_exchange): - self._default_exchange = default_exchange - self._exchanges_lock = threading.Lock() - self._exchanges = {} - - def get_exchange(self, name): - if name is None: - name = self._default_exchange - with self._exchanges_lock: - return self._exchanges.setdefault(name, FakeExchange(name)) - - -class FakeDriver(base.BaseDriver): - """Fake driver used for testing. - - This driver passes messages in memory, and should only be used for - unit tests. - - """ - - def __init__(self, conf, url, default_exchange=None, - allowed_remote_exmods=None): - super(FakeDriver, self).__init__(conf, url, default_exchange, - allowed_remote_exmods) - - self._exchange_manager = FakeExchangeManager(default_exchange) - - def require_features(self, requeue=True): - pass - - @staticmethod - def _check_serialize(message): - """Make sure a message intended for rpc can be serialized. - - We specifically want to use json, not our own jsonutils because - jsonutils has some extra logic to automatically convert objects to - primitive types so that they can be serialized. We want to catch all - cases where non-primitive types make it into this code and treat it as - an error. - """ - json.dumps(message) - - def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None): - self._check_serialize(message) - - exchange = self._exchange_manager.get_exchange(target.exchange) - - reply_q = None - if wait_for_reply: - reply_q = moves.queue.Queue() - - exchange.deliver_message(target.topic, ctxt, message, - server=target.server, - fanout=target.fanout, - reply_q=reply_q) - - if wait_for_reply: - try: - reply, failure = reply_q.get(timeout=timeout) - if failure: - raise failure - else: - return reply - except moves.queue.Empty: - raise oslo_messaging.MessagingTimeout( - 'No reply on topic %s' % target.topic) - - return None - - def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, - retry=None): - # NOTE(sileht): retry doesn't need to be implemented, the fake - # transport always works - return self._send(target, ctxt, message, wait_for_reply, timeout) - - def send_notification(self, target, ctxt, message, version, retry=None): - # NOTE(sileht): retry doesn't need to be implemented, the fake - # transport always works - self._send(target, ctxt, message) - - def listen(self, target, batch_size, batch_timeout): - exchange = target.exchange or self._default_exchange - listener = FakeListener(self._exchange_manager, - [oslo_messaging.Target( - topic=target.topic, - server=target.server, - exchange=exchange), - oslo_messaging.Target( - topic=target.topic, - exchange=exchange)]) - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - targets = [ - oslo_messaging.Target( - topic='%s.%s' % (target.topic, priority), - exchange=target.exchange) - for target, priority in targets_and_priorities] - listener = FakeListener(self._exchange_manager, targets, pool) - - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def cleanup(self): - pass diff --git a/oslo_messaging/_drivers/impl_kafka.py b/oslo_messaging/_drivers/impl_kafka.py deleted file mode 100644 index b448fcd..0000000 --- a/oslo_messaging/_drivers/impl_kafka.py +++ /dev/null @@ -1,378 +0,0 @@ -# Copyright (C) 2015 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import threading - -from oslo_messaging._drivers import base -from oslo_messaging._drivers import common as driver_common -from oslo_messaging._drivers import pool as driver_pool -from oslo_messaging._i18n import _LE -from oslo_messaging._i18n import _LW -from oslo_serialization import jsonutils - -import kafka -from kafka.common import KafkaError -from oslo_config import cfg -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - -PURPOSE_SEND = 'send' -PURPOSE_LISTEN = 'listen' - -kafka_opts = [ - cfg.StrOpt('kafka_default_host', default='localhost', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='Default Kafka broker Host'), - - cfg.PortOpt('kafka_default_port', default=9092, - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='Default Kafka broker Port'), - - cfg.IntOpt('kafka_max_fetch_bytes', default=1024 * 1024, - help='Max fetch bytes of Kafka consumer'), - - cfg.IntOpt('kafka_consumer_timeout', default=1.0, - help='Default timeout(s) for Kafka consumers'), - - cfg.IntOpt('pool_size', default=10, - help='Pool Size for Kafka Consumers'), - - cfg.IntOpt('conn_pool_min_size', default=2, - help='The pool size limit for connections expiration policy'), - - cfg.IntOpt('conn_pool_ttl', default=1200, - help='The time-to-live in sec of idle connections in the pool') -] - -CONF = cfg.CONF - - -def pack_context_with_message(ctxt, msg): - """Pack context into msg.""" - if isinstance(ctxt, dict): - context_d = ctxt - else: - context_d = ctxt.to_dict() - - return {'message': msg, 'context': context_d} - - -def target_to_topic(target, priority=None): - """Convert target into topic string - - :param target: Message destination target - :type target: oslo_messaging.Target - :param priority: Notification priority - :type priority: string - """ - if not priority: - return target.topic - return target.topic + '.' + priority - - -class Connection(object): - - def __init__(self, conf, url, purpose): - - driver_conf = conf.oslo_messaging_kafka - - self.conf = conf - self.kafka_client = None - self.producer = None - self.consumer = None - self.fetch_messages_max_bytes = driver_conf.kafka_max_fetch_bytes - self.consumer_timeout = float(driver_conf.kafka_consumer_timeout) - self.url = url - self._parse_url() - # TODO(Support for manual/auto_commit functionality) - # When auto_commit is False, consumer can manually notify - # the completion of the subscription. - # Currently we don't support for non auto commit option - self.auto_commit = True - self._consume_loop_stopped = False - - def _parse_url(self): - driver_conf = self.conf.oslo_messaging_kafka - - self.hostaddrs = [] - - for host in self.url.hosts: - if host.hostname: - self.hostaddrs.append("%s:%s" % ( - host.hostname, - host.port or driver_conf.kafka_default_port)) - - if not self.hostaddrs: - self.hostaddrs.append("%s:%s" % (driver_conf.kafka_default_host, - driver_conf.kafka_default_port)) - - def notify_send(self, topic, ctxt, msg, retry): - """Send messages to Kafka broker. - - :param topic: String of the topic - :param ctxt: context for the messages - :param msg: messages for publishing - :param retry: the number of retry - """ - message = pack_context_with_message(ctxt, msg) - self._ensure_connection() - self._send_and_retry(message, topic, retry) - - def _send_and_retry(self, message, topic, retry): - current_retry = 0 - if not isinstance(message, str): - message = jsonutils.dumps(message) - while message is not None: - try: - self._send(message, topic) - message = None - except Exception: - LOG.warning(_LW("Failed to publish a message of topic %s"), - topic) - current_retry += 1 - if retry is not None and current_retry >= retry: - LOG.exception(_LE("Failed to retry to send data " - "with max retry times")) - message = None - - def _send(self, message, topic): - self.producer.send_messages(topic, message) - - def consume(self, timeout=None): - """Receive up to 'max_fetch_messages' messages. - - :param timeout: poll timeout in seconds - """ - duration = (self.consumer_timeout if timeout is None else timeout) - timer = driver_common.DecayingTimer(duration=duration) - timer.start() - - def _raise_timeout(): - LOG.debug('Timed out waiting for Kafka response') - raise driver_common.Timeout() - - poll_timeout = (self.consumer_timeout if timeout is None - else min(timeout, self.consumer_timeout)) - - while True: - if self._consume_loop_stopped: - return - try: - next_timeout = poll_timeout * 1000.0 - # TODO(use configure() method instead) - # Currently KafkaConsumer does not support for - # the case of updating only fetch_max_wait_ms parameter - self.consumer._config['fetch_max_wait_ms'] = next_timeout - messages = list(self.consumer.fetch_messages()) - except Exception as e: - LOG.exception(_LE("Failed to consume messages: %s"), e) - messages = None - - if not messages: - poll_timeout = timer.check_return( - _raise_timeout, maximum=self.consumer_timeout) - continue - - return messages - - def stop_consuming(self): - self._consume_loop_stopped = True - - def reset(self): - """Reset a connection so it can be used again.""" - if self.consumer: - self.consumer.close() - self.consumer = None - - def close(self): - if self.kafka_client: - self.kafka_client.close() - self.kafka_client = None - if self.producer: - self.producer.stop() - self.consumer = None - - def commit(self): - """Commit is used by subscribers belonging to the same group. - After subscribing messages, commit is called to prevent - the other subscribers which belong to the same group - from re-subscribing the same messages. - - Currently self.auto_commit option is always True, - so we don't need to call this function. - """ - self.consumer.commit() - - def _ensure_connection(self): - if self.kafka_client: - return - try: - self.kafka_client = kafka.KafkaClient( - self.hostaddrs) - self.producer = kafka.SimpleProducer(self.kafka_client) - except KafkaError as e: - LOG.exception(_LE("Kafka Connection is not available: %s"), e) - self.kafka_client = None - - def declare_topic_consumer(self, topics, group=None): - self._ensure_connection() - for topic in topics: - self.kafka_client.ensure_topic_exists(topic) - self.consumer = kafka.KafkaConsumer( - *topics, group_id=group, - bootstrap_servers=self.hostaddrs, - fetch_message_max_bytes=self.fetch_messages_max_bytes) - self._consume_loop_stopped = False - - -class OsloKafkaMessage(base.RpcIncomingMessage): - - def __init__(self, ctxt, message): - super(OsloKafkaMessage, self).__init__(ctxt, message) - - def requeue(self): - LOG.warning(_LW("requeue is not supported")) - - def reply(self, reply=None, failure=None): - LOG.warning(_LW("reply is not supported")) - - -class KafkaListener(base.PollStyleListener): - - def __init__(self, conn): - super(KafkaListener, self).__init__() - self._stopped = threading.Event() - self.conn = conn - self.incoming_queue = [] - - @base.batch_poll_helper - def poll(self, timeout=None): - while not self._stopped.is_set(): - if self.incoming_queue: - return self.incoming_queue.pop(0) - try: - messages = self.conn.consume(timeout=timeout) - for msg in messages: - message = msg.value - LOG.debug('poll got message : %s', message) - message = jsonutils.loads(message) - self.incoming_queue.append(OsloKafkaMessage( - ctxt=message['context'], message=message['message'])) - except driver_common.Timeout: - return None - - def stop(self): - self._stopped.set() - self.conn.stop_consuming() - - def cleanup(self): - self.conn.close() - - def commit(self): - # TODO(Support for manually/auto commit functionality) - # It's better to allow users to commit manually and support for - # self.auto_commit = False option. For now, this commit function - # is meaningless since user couldn't call this function and - # auto_commit option is always True. - self.conn.commit() - - -class KafkaDriver(base.BaseDriver): - """Note: Current implementation of this driver is experimental. - We will have functional and/or integrated testing enabled for this driver. - """ - - def __init__(self, conf, url, default_exchange=None, - allowed_remote_exmods=None): - - opt_group = cfg.OptGroup(name='oslo_messaging_kafka', - title='Kafka driver options') - conf.register_group(opt_group) - conf.register_opts(kafka_opts, group=opt_group) - - super(KafkaDriver, self).__init__( - conf, url, default_exchange, allowed_remote_exmods) - - # the pool configuration properties - max_size = self.conf.oslo_messaging_kafka.pool_size - min_size = self.conf.oslo_messaging_kafka.conn_pool_min_size - ttl = self.conf.oslo_messaging_kafka.conn_pool_ttl - - self.connection_pool = driver_pool.ConnectionPool( - self.conf, max_size, min_size, ttl, - self._url, Connection) - self.listeners = [] - - def cleanup(self): - for c in self.listeners: - c.close() - self.listeners = [] - - def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, - retry=None): - raise NotImplementedError( - 'The RPC implementation for Kafka is not implemented') - - def send_notification(self, target, ctxt, message, version, retry=None): - """Send notification to Kafka brokers - - :param target: Message destination target - :type target: oslo_messaging.Target - :param ctxt: Message context - :type ctxt: dict - :param message: Message payload to pass - :type message: dict - :param version: Messaging API version (currently not used) - :type version: str - :param retry: an optional default kafka consumer retries configuration - None means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - with self._get_connection(purpose=PURPOSE_SEND) as conn: - conn.notify_send(target_to_topic(target), ctxt, message, retry) - - def listen(self, target): - raise NotImplementedError( - 'The RPC implementation for Kafka is not implemented') - - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - """Listen to a specified list of targets on Kafka brokers - - :param targets_and_priorities: List of pairs (target, priority) - priority is not used for kafka driver - target.exchange_target.topic is used as - a kafka topic - :type targets_and_priorities: list - :param pool: consumer group of Kafka consumers - :type pool: string - """ - conn = self._get_connection(purpose=PURPOSE_LISTEN) - topics = set() - for target, priority in targets_and_priorities: - topics.add(target_to_topic(target, priority)) - - conn.declare_topic_consumer(topics, pool) - - listener = KafkaListener(conn) - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def _get_connection(self, purpose): - return driver_common.ConnectionContext(self.connection_pool, purpose) diff --git a/oslo_messaging/_drivers/impl_pika.py b/oslo_messaging/_drivers/impl_pika.py deleted file mode 100644 index 7ad0744..0000000 --- a/oslo_messaging/_drivers/impl_pika.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -import pika_pool -import retrying - -from oslo_messaging._drivers import base -from oslo_messaging._drivers.pika_driver import (pika_connection_factory as - pika_drv_conn_factory) -from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns -from oslo_messaging._drivers.pika_driver import pika_engine as pika_drv_engine -from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc -from oslo_messaging._drivers.pika_driver import pika_listener as pika_drv_lstnr -from oslo_messaging._drivers.pika_driver import pika_message as pika_drv_msg -from oslo_messaging._drivers.pika_driver import pika_poller as pika_drv_poller -from oslo_messaging import exceptions - -LOG = logging.getLogger(__name__) - -pika_pool_opts = [ - cfg.IntOpt('pool_max_size', default=30, - help="Maximum number of connections to keep queued."), - cfg.IntOpt('pool_max_overflow', default=0, - help="Maximum number of connections to create above " - "`pool_max_size`."), - cfg.IntOpt('pool_timeout', default=30, - help="Default number of seconds to wait for a connections to " - "available"), - cfg.IntOpt('pool_recycle', default=600, - help="Lifetime of a connection (since creation) in seconds " - "or None for no recycling. Expired connections are " - "closed on acquire."), - cfg.IntOpt('pool_stale', default=60, - help="Threshold at which inactive (since release) connections " - "are considered stale in seconds or None for no " - "staleness. Stale connections are closed on acquire.") -] - -notification_opts = [ - cfg.BoolOpt('notification_persistence', default=False, - help="Persist notification messages."), - cfg.StrOpt('default_notification_exchange', - default="${control_exchange}_notification", - help="Exchange name for sending notifications"), - cfg.IntOpt( - 'notification_listener_prefetch_count', default=100, - help="Max number of not acknowledged message which RabbitMQ can send " - "to notification listener." - ), - cfg.IntOpt( - 'default_notification_retry_attempts', default=-1, - help="Reconnecting retry count in case of connectivity problem during " - "sending notification, -1 means infinite retry." - ), - cfg.FloatOpt( - 'notification_retry_delay', default=0.25, - help="Reconnecting retry delay in case of connectivity problem during " - "sending notification message" - ) -] - -rpc_opts = [ - cfg.IntOpt('rpc_queue_expiration', default=60, - help="Time to live for rpc queues without consumers in " - "seconds."), - cfg.StrOpt('default_rpc_exchange', default="${control_exchange}_rpc", - help="Exchange name for sending RPC messages"), - cfg.StrOpt('rpc_reply_exchange', default="${control_exchange}_rpc_reply", - help="Exchange name for receiving RPC replies"), - cfg.IntOpt( - 'rpc_listener_prefetch_count', default=100, - help="Max number of not acknowledged message which RabbitMQ can send " - "to rpc listener." - ), - cfg.IntOpt( - 'rpc_reply_listener_prefetch_count', default=100, - help="Max number of not acknowledged message which RabbitMQ can send " - "to rpc reply listener." - ), - cfg.IntOpt( - 'rpc_reply_retry_attempts', default=-1, - help="Reconnecting retry count in case of connectivity problem during " - "sending reply. -1 means infinite retry during rpc_timeout" - ), - cfg.FloatOpt( - 'rpc_reply_retry_delay', default=0.25, - help="Reconnecting retry delay in case of connectivity problem during " - "sending reply." - ), - cfg.IntOpt( - 'default_rpc_retry_attempts', default=-1, - help="Reconnecting retry count in case of connectivity problem during " - "sending RPC message, -1 means infinite retry. If actual " - "retry attempts in not 0 the rpc request could be processed more " - "then one time" - ), - cfg.FloatOpt( - 'rpc_retry_delay', default=0.25, - help="Reconnecting retry delay in case of connectivity problem during " - "sending RPC message" - ) -] - - -class PikaDriver(base.BaseDriver): - def __init__(self, conf, url, default_exchange=None, - allowed_remote_exmods=None): - opt_group = cfg.OptGroup(name='oslo_messaging_pika', - title='Pika driver options') - conf.register_group(opt_group) - conf.register_opts(pika_drv_conn_factory.pika_opts, group=opt_group) - conf.register_opts(pika_pool_opts, group=opt_group) - conf.register_opts(rpc_opts, group=opt_group) - conf.register_opts(notification_opts, group=opt_group) - - self._pika_engine = pika_drv_engine.PikaEngine( - conf, url, default_exchange, allowed_remote_exmods - ) - self._reply_listener = pika_drv_lstnr.RpcReplyPikaListener( - self._pika_engine - ) - super(PikaDriver, self).__init__(conf, url, default_exchange, - allowed_remote_exmods) - - def require_features(self, requeue=False): - pass - - def _declare_rpc_exchange(self, exchange, stopwatch): - timeout = stopwatch.leftover(return_none=True) - with (self._pika_engine.connection_without_confirmation_pool - .acquire(timeout=timeout)) as conn: - try: - self._pika_engine.declare_exchange_by_channel( - conn.channel, - self._pika_engine.get_rpc_exchange_name( - exchange - ), "direct", False - ) - except pika_pool.Timeout as e: - raise exceptions.MessagingTimeout( - "Timeout for current operation was expired. {}.".format( - str(e) - ) - ) - - def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, - retry=None): - with timeutils.StopWatch(duration=timeout) as stopwatch: - if retry is None: - retry = self._pika_engine.default_rpc_retry_attempts - - exchange = self._pika_engine.get_rpc_exchange_name( - target.exchange - ) - - def on_exception(ex): - if isinstance(ex, pika_drv_exc.ExchangeNotFoundException): - # it is desired to create exchange because if we sent to - # exchange which is not exists, we get ChannelClosed - # exception and need to reconnect - try: - self._declare_rpc_exchange(exchange, stopwatch) - except pika_drv_exc.ConnectionException as e: - LOG.warning("Problem during declaring exchange. %s", e) - return True - elif isinstance(ex, (pika_drv_exc.ConnectionException, - exceptions.MessageDeliveryFailure)): - LOG.warning("Problem during message sending. %s", ex) - return True - else: - return False - - retrier = ( - None if retry == 0 else - retrying.retry( - stop_max_attempt_number=(None if retry == -1 else retry), - retry_on_exception=on_exception, - wait_fixed=self._pika_engine.rpc_retry_delay * 1000, - ) - ) - - if target.fanout: - return self.cast_all_workers( - exchange, target.topic, ctxt, message, stopwatch, retrier - ) - - routing_key = self._pika_engine.get_rpc_queue_name( - target.topic, target.server, retrier is None - ) - - msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine, - message, ctxt) - try: - reply = msg.send( - exchange=exchange, - routing_key=routing_key, - reply_listener=( - self._reply_listener if wait_for_reply else None - ), - stopwatch=stopwatch, - retrier=retrier - ) - except pika_drv_exc.ExchangeNotFoundException as ex: - try: - self._declare_rpc_exchange(exchange, stopwatch) - except pika_drv_exc.ConnectionException as e: - LOG.warning("Problem during declaring exchange. %s", e) - raise ex - - if reply is not None: - if reply.failure is not None: - raise reply.failure - - return reply.result - - def cast_all_workers(self, exchange, topic, ctxt, message, stopwatch, - retrier=None): - msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message, - ctxt) - try: - msg.send( - exchange=exchange, - routing_key=self._pika_engine.get_rpc_queue_name( - topic, "all_workers", retrier is None - ), - mandatory=False, - stopwatch=stopwatch, - retrier=retrier - ) - except pika_drv_exc.ExchangeNotFoundException: - try: - self._declare_rpc_exchange(exchange, stopwatch) - except pika_drv_exc.ConnectionException as e: - LOG.warning("Problem during declaring exchange. %s", e) - - def _declare_notification_queue_binding( - self, target, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH): - if stopwatch.expired(): - raise exceptions.MessagingTimeout( - "Timeout for current operation was expired." - ) - try: - timeout = stopwatch.leftover(return_none=True) - with (self._pika_engine.connection_without_confirmation_pool - .acquire)(timeout=timeout) as conn: - self._pika_engine.declare_queue_binding_by_channel( - conn.channel, - exchange=( - target.exchange or - self._pika_engine.default_notification_exchange - ), - queue=target.topic, - routing_key=target.topic, - exchange_type='direct', - queue_expiration=None, - durable=self._pika_engine.notification_persistence, - ) - except pika_pool.Timeout as e: - raise exceptions.MessagingTimeout( - "Timeout for current operation was expired. {}.".format(str(e)) - ) - - def send_notification(self, target, ctxt, message, version, retry=None): - if retry is None: - retry = self._pika_engine.default_notification_retry_attempts - - def on_exception(ex): - if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException, - pika_drv_exc.RoutingException)): - LOG.warning("Problem during sending notification. %s", ex) - try: - self._declare_notification_queue_binding(target) - except pika_drv_exc.ConnectionException as e: - LOG.warning("Problem during declaring notification queue " - "binding. %s", e) - return True - elif isinstance(ex, (pika_drv_exc.ConnectionException, - pika_drv_exc.MessageRejectedException)): - LOG.warning("Problem during sending notification. %s", ex) - return True - else: - return False - - retrier = retrying.retry( - stop_max_attempt_number=(None if retry == -1 else retry), - retry_on_exception=on_exception, - wait_fixed=self._pika_engine.notification_retry_delay * 1000, - ) - - msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message, - ctxt) - return msg.send( - exchange=( - target.exchange or - self._pika_engine.default_notification_exchange - ), - routing_key=target.topic, - confirm=True, - mandatory=True, - persistent=self._pika_engine.notification_persistence, - retrier=retrier - ) - - def listen(self, target, batch_size, batch_timeout): - return pika_drv_poller.RpcServicePikaPoller( - self._pika_engine, target, batch_size, batch_timeout, - self._pika_engine.rpc_listener_prefetch_count - ) - - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - return pika_drv_poller.NotificationPikaPoller( - self._pika_engine, targets_and_priorities, batch_size, - batch_timeout, - self._pika_engine.notification_listener_prefetch_count, pool - ) - - def cleanup(self): - self._reply_listener.cleanup() - self._pika_engine.cleanup() diff --git a/oslo_messaging/_drivers/impl_rabbit.py b/oslo_messaging/_drivers/impl_rabbit.py deleted file mode 100644 index ffe8901..0000000 --- a/oslo_messaging/_drivers/impl_rabbit.py +++ /dev/null @@ -1,1345 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import errno -import functools -import itertools -import math -import os -import random -import socket -import ssl -import sys -import threading -import time -import uuid - -import kombu -import kombu.connection -import kombu.entity -import kombu.messaging -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import netutils -import six -from six.moves.urllib import parse - -from oslo_messaging._drivers import amqp as rpc_amqp -from oslo_messaging._drivers import amqpdriver -from oslo_messaging._drivers import base -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers import pool -from oslo_messaging._i18n import _ -from oslo_messaging._i18n import _LE -from oslo_messaging._i18n import _LI -from oslo_messaging._i18n import _LW -from oslo_messaging import _utils -from oslo_messaging import exceptions - -# NOTE(sileht): don't exists in py2 socket module -TCP_USER_TIMEOUT = 18 - - -rabbit_opts = [ - cfg.StrOpt('kombu_ssl_version', - default='', - deprecated_group='DEFAULT', - help='SSL version to use (valid only if SSL enabled). ' - 'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, ' - 'TLSv1_1, and TLSv1_2 may be available on some ' - 'distributions.' - ), - cfg.StrOpt('kombu_ssl_keyfile', - default='', - deprecated_group='DEFAULT', - help='SSL key file (valid only if SSL enabled).'), - cfg.StrOpt('kombu_ssl_certfile', - default='', - deprecated_group='DEFAULT', - help='SSL cert file (valid only if SSL enabled).'), - cfg.StrOpt('kombu_ssl_ca_certs', - default='', - deprecated_group='DEFAULT', - help='SSL certification authority file ' - '(valid only if SSL enabled).'), - cfg.FloatOpt('kombu_reconnect_delay', - default=1.0, - deprecated_group='DEFAULT', - help='How long to wait before reconnecting in response to an ' - 'AMQP consumer cancel notification.'), - cfg.StrOpt('kombu_compression', - help="EXPERIMENTAL: Possible values are: gzip, bz2. If not " - "set compression will not be used. This option may not" - "be available in future versions."), - cfg.IntOpt('kombu_missing_consumer_retry_timeout', - deprecated_name="kombu_reconnect_timeout", - default=60, - help='How long to wait a missing client before abandoning to ' - 'send it its replies. This value should not be longer ' - 'than rpc_response_timeout.'), - cfg.StrOpt('kombu_failover_strategy', - choices=('round-robin', 'shuffle'), - default='round-robin', - help='Determines how the next RabbitMQ node is chosen in case ' - 'the one we are currently connected to becomes ' - 'unavailable. Takes effect only if more than one ' - 'RabbitMQ node is provided in config.'), - cfg.StrOpt('rabbit_host', - default='localhost', - deprecated_group='DEFAULT', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='The RabbitMQ broker address where a single node is ' - 'used.'), - cfg.PortOpt('rabbit_port', - default=5672, - deprecated_group='DEFAULT', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='The RabbitMQ broker port where a single node is used.'), - cfg.ListOpt('rabbit_hosts', - default=['$rabbit_host:$rabbit_port'], - deprecated_group='DEFAULT', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='RabbitMQ HA cluster host:port pairs.'), - cfg.BoolOpt('rabbit_use_ssl', - default=False, - deprecated_group='DEFAULT', - help='Connect over SSL for RabbitMQ.'), - cfg.StrOpt('rabbit_userid', - default='guest', - deprecated_group='DEFAULT', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='The RabbitMQ userid.'), - cfg.StrOpt('rabbit_password', - default='guest', - deprecated_group='DEFAULT', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='The RabbitMQ password.', - secret=True), - cfg.StrOpt('rabbit_login_method', - default='AMQPLAIN', - deprecated_group='DEFAULT', - help='The RabbitMQ login method.'), - cfg.StrOpt('rabbit_virtual_host', - default='/', - deprecated_group='DEFAULT', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='The RabbitMQ virtual host.'), - cfg.IntOpt('rabbit_retry_interval', - default=1, - help='How frequently to retry connecting with RabbitMQ.'), - cfg.IntOpt('rabbit_retry_backoff', - default=2, - deprecated_group='DEFAULT', - help='How long to backoff for between retries when connecting ' - 'to RabbitMQ.'), - cfg.IntOpt('rabbit_interval_max', - default=30, - help='Maximum interval of RabbitMQ connection retries. ' - 'Default is 30 seconds.'), - cfg.IntOpt('rabbit_max_retries', - default=0, - deprecated_for_removal=True, - deprecated_group='DEFAULT', - help='Maximum number of RabbitMQ connection retries. ' - 'Default is 0 (infinite retry count).'), - cfg.BoolOpt('rabbit_ha_queues', - default=False, - deprecated_group='DEFAULT', - help='Try to use HA queues in RabbitMQ (x-ha-policy: all). ' - 'If you change this option, you must wipe the RabbitMQ ' - 'database. In RabbitMQ 3.0, queue mirroring is no longer ' - 'controlled by the x-ha-policy argument when declaring a ' - 'queue. If you just want to make sure that all queues (except ' - ' those with auto-generated names) are mirrored across all ' - 'nodes, run: ' - """\"rabbitmqctl set_policy HA '^(?!amq\.).*' """ - """'{"ha-mode": "all"}' \""""), - cfg.IntOpt('rabbit_transient_queues_ttl', - min=1, - default=1800, - help='Positive integer representing duration in seconds for ' - 'queue TTL (x-expires). Queues which are unused for the ' - 'duration of the TTL are automatically deleted. The ' - 'parameter affects only reply and fanout queues.'), - cfg.IntOpt('rabbit_qos_prefetch_count', - default=0, - help='Specifies the number of messages to prefetch. Setting to ' - 'zero allows unlimited messages.'), - cfg.IntOpt('heartbeat_timeout_threshold', - default=60, - help="Number of seconds after which the Rabbit broker is " - "considered down if heartbeat's keep-alive fails " - "(0 disable the heartbeat). EXPERIMENTAL"), - cfg.IntOpt('heartbeat_rate', - default=2, - help='How often times during the heartbeat_timeout_threshold ' - 'we check the heartbeat.'), - - # NOTE(sileht): deprecated option since oslo_messaging 1.5.0, - cfg.BoolOpt('fake_rabbit', - default=False, - deprecated_group='DEFAULT', - help='Deprecated, use rpc_backend=kombu+memory or ' - 'rpc_backend=fake'), -] - -LOG = logging.getLogger(__name__) - - -def _get_queue_arguments(rabbit_ha_queues, rabbit_queue_ttl): - """Construct the arguments for declaring a queue. - - If the rabbit_ha_queues option is set, we try to declare a mirrored queue - as described here: - - http://www.rabbitmq.com/ha.html - - Setting x-ha-policy to all means that the queue will be mirrored - to all nodes in the cluster. In RabbitMQ 3.0, queue mirroring is - no longer controlled by the x-ha-policy argument when declaring a - queue. If you just want to make sure that all queues (except those - with auto-generated names) are mirrored across all nodes, run: - rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-mode": "all"}' - - If the rabbit_queue_ttl option is > 0, then the queue is - declared with the "Queue TTL" value as described here: - - https://www.rabbitmq.com/ttl.html - - Setting a queue TTL causes the queue to be automatically deleted - if it is unused for the TTL duration. This is a helpful safeguard - to prevent queues with zero consumers from growing without bound. - """ - args = {} - - if rabbit_ha_queues: - args['x-ha-policy'] = 'all' - - if rabbit_queue_ttl > 0: - args['x-expires'] = rabbit_queue_ttl * 1000 - - return args - - -class RabbitMessage(dict): - def __init__(self, raw_message): - super(RabbitMessage, self).__init__( - rpc_common.deserialize_msg(raw_message.payload)) - LOG.trace('RabbitMessage.Init: message %s', self) - self._raw_message = raw_message - - def acknowledge(self): - LOG.trace('RabbitMessage.acknowledge: message %s', self) - self._raw_message.ack() - - def requeue(self): - LOG.trace('RabbitMessage.requeue: message %s', self) - self._raw_message.requeue() - - -class Consumer(object): - """Consumer class.""" - - def __init__(self, exchange_name, queue_name, routing_key, type, durable, - exchange_auto_delete, queue_auto_delete, callback, - nowait=False, rabbit_ha_queues=None, rabbit_queue_ttl=0): - """Init the Publisher class with the exchange_name, routing_key, - type, durable auto_delete - """ - self.queue_name = queue_name - self.exchange_name = exchange_name - self.routing_key = routing_key - self.exchange_auto_delete = exchange_auto_delete - self.queue_auto_delete = queue_auto_delete - self.durable = durable - self.callback = callback - self.type = type - self.nowait = nowait - self.queue_arguments = _get_queue_arguments(rabbit_ha_queues, - rabbit_queue_ttl) - - self.queue = None - self.exchange = kombu.entity.Exchange( - name=exchange_name, - type=type, - durable=self.durable, - auto_delete=self.exchange_auto_delete) - - def declare(self, conn): - """Re-declare the queue after a rabbit (re)connect.""" - self.queue = kombu.entity.Queue( - name=self.queue_name, - channel=conn.channel, - exchange=self.exchange, - durable=self.durable, - auto_delete=self.queue_auto_delete, - routing_key=self.routing_key, - queue_arguments=self.queue_arguments) - - try: - LOG.debug('[%s] Queue.declare: %s', - conn.connection_id, self.queue_name) - self.queue.declare() - except conn.connection.channel_errors as exc: - # NOTE(jrosenboom): This exception may be triggered by a race - # condition. Simply retrying will solve the error most of the time - # and should work well enough as a workaround until the race - # condition itself can be fixed. - # See https://bugs.launchpad.net/neutron/+bug/1318721 for details. - if exc.code == 404: - self.queue.declare() - else: - raise - - def consume(self, tag): - """Actually declare the consumer on the amqp channel. This will - start the flow of messages from the queue. Using the - Connection.consume() will process the messages, - calling the appropriate callback. - """ - - self.queue.consume(callback=self._callback, - consumer_tag=six.text_type(tag), - nowait=self.nowait) - - def cancel(self, tag): - LOG.trace('ConsumerBase.cancel: canceling %s', tag) - self.queue.cancel(six.text_type(tag)) - - def _callback(self, message): - """Call callback with deserialized message. - - Messages that are processed and ack'ed. - """ - - m2p = getattr(self.queue.channel, 'message_to_python', None) - if m2p: - message = m2p(message) - try: - self.callback(RabbitMessage(message)) - except Exception: - LOG.exception(_LE("Failed to process message" - " ... skipping it.")) - message.ack() - - -class DummyConnectionLock(_utils.DummyLock): - def heartbeat_acquire(self): - pass - - -class ConnectionLock(DummyConnectionLock): - """Lock object to protect access the the kombu connection - - This is a lock object to protect access the the kombu connection - object between the heartbeat thread and the driver thread. - - They are two way to acquire this lock: - * lock.acquire() - * lock.heartbeat_acquire() - - In both case lock.release(), release the lock. - - The goal is that the heartbeat thread always have the priority - for acquiring the lock. This ensures we have no heartbeat - starvation when the driver sends a lot of messages. - - So when lock.heartbeat_acquire() is called next time the lock - is released(), the caller unconditionally acquires - the lock, even someone else have asked for the lock before it. - """ - - def __init__(self): - self._workers_waiting = 0 - self._heartbeat_waiting = False - self._lock_acquired = None - self._monitor = threading.Lock() - self._workers_locks = threading.Condition(self._monitor) - self._heartbeat_lock = threading.Condition(self._monitor) - self._get_thread_id = _utils.fetch_current_thread_functor() - - def acquire(self): - with self._monitor: - while self._lock_acquired: - self._workers_waiting += 1 - self._workers_locks.wait() - self._workers_waiting -= 1 - self._lock_acquired = self._get_thread_id() - - def heartbeat_acquire(self): - # NOTE(sileht): must be called only one time - with self._monitor: - while self._lock_acquired is not None: - self._heartbeat_waiting = True - self._heartbeat_lock.wait() - self._heartbeat_waiting = False - self._lock_acquired = self._get_thread_id() - - def release(self): - with self._monitor: - if self._lock_acquired is None: - raise RuntimeError("We can't release a not acquired lock") - thread_id = self._get_thread_id() - if self._lock_acquired != thread_id: - raise RuntimeError("We can't release lock acquired by another " - "thread/greenthread; %s vs %s" % - (self._lock_acquired, thread_id)) - self._lock_acquired = None - if self._heartbeat_waiting: - self._heartbeat_lock.notify() - elif self._workers_waiting > 0: - self._workers_locks.notify() - - @contextlib.contextmanager - def for_heartbeat(self): - self.heartbeat_acquire() - try: - yield - finally: - self.release() - - -class Connection(object): - """Connection object.""" - - pools = {} - - def __init__(self, conf, url, purpose): - # NOTE(viktors): Parse config options - driver_conf = conf.oslo_messaging_rabbit - - self.max_retries = driver_conf.rabbit_max_retries - self.interval_start = driver_conf.rabbit_retry_interval - self.interval_stepping = driver_conf.rabbit_retry_backoff - self.interval_max = driver_conf.rabbit_interval_max - - self.login_method = driver_conf.rabbit_login_method - self.fake_rabbit = driver_conf.fake_rabbit - self.virtual_host = driver_conf.rabbit_virtual_host - self.rabbit_hosts = driver_conf.rabbit_hosts - self.rabbit_port = driver_conf.rabbit_port - self.rabbit_userid = driver_conf.rabbit_userid - self.rabbit_password = driver_conf.rabbit_password - self.rabbit_ha_queues = driver_conf.rabbit_ha_queues - self.rabbit_transient_queues_ttl = \ - driver_conf.rabbit_transient_queues_ttl - self.rabbit_qos_prefetch_count = driver_conf.rabbit_qos_prefetch_count - self.heartbeat_timeout_threshold = \ - driver_conf.heartbeat_timeout_threshold - self.heartbeat_rate = driver_conf.heartbeat_rate - self.kombu_reconnect_delay = driver_conf.kombu_reconnect_delay - self.amqp_durable_queues = driver_conf.amqp_durable_queues - self.amqp_auto_delete = driver_conf.amqp_auto_delete - self.rabbit_use_ssl = driver_conf.rabbit_use_ssl - self.kombu_missing_consumer_retry_timeout = \ - driver_conf.kombu_missing_consumer_retry_timeout - self.kombu_failover_strategy = driver_conf.kombu_failover_strategy - self.kombu_compression = driver_conf.kombu_compression - - if self.rabbit_use_ssl: - self.kombu_ssl_version = driver_conf.kombu_ssl_version - self.kombu_ssl_keyfile = driver_conf.kombu_ssl_keyfile - self.kombu_ssl_certfile = driver_conf.kombu_ssl_certfile - self.kombu_ssl_ca_certs = driver_conf.kombu_ssl_ca_certs - - # Try forever? - if self.max_retries <= 0: - self.max_retries = None - - if url.virtual_host is not None: - virtual_host = url.virtual_host - else: - virtual_host = self.virtual_host - - self._url = '' - if self.fake_rabbit: - LOG.warning(_LW("Deprecated: fake_rabbit option is deprecated, " - "set rpc_backend to kombu+memory or use the fake " - "driver instead.")) - self._url = 'memory://%s/' % virtual_host - elif url.hosts: - if url.transport.startswith('kombu+'): - LOG.warning(_LW('Selecting the kombu transport through the ' - 'transport url (%s) is a experimental feature ' - 'and this is not yet supported.'), - url.transport) - if len(url.hosts) > 1: - random.shuffle(url.hosts) - for host in url.hosts: - transport = url.transport.replace('kombu+', '') - transport = transport.replace('rabbit', 'amqp') - self._url += '%s%s://%s:%s@%s:%s/%s' % ( - ";" if self._url else '', - transport, - parse.quote(host.username or ''), - parse.quote(host.password or ''), - self._parse_url_hostname(host.hostname) or '', - str(host.port or 5672), - virtual_host) - elif url.transport.startswith('kombu+'): - # NOTE(sileht): url have a + but no hosts - # (like kombu+memory:///), pass it to kombu as-is - transport = url.transport.replace('kombu+', '') - self._url = "%s://%s" % (transport, virtual_host) - else: - if len(self.rabbit_hosts) > 1: - random.shuffle(self.rabbit_hosts) - for adr in self.rabbit_hosts: - hostname, port = netutils.parse_host_port( - adr, default_port=self.rabbit_port) - self._url += '%samqp://%s:%s@%s:%s/%s' % ( - ";" if self._url else '', - parse.quote(self.rabbit_userid, ''), - parse.quote(self.rabbit_password, ''), - self._parse_url_hostname(hostname), port, - virtual_host) - - self._initial_pid = os.getpid() - - self._consumers = {} - self._producer = None - self._new_tags = set() - self._active_tags = {} - self._tags = itertools.count(1) - - # Set of exchanges and queues declared on the channel to avoid - # unnecessary redeclaration. This set is resetted each time - # the connection is resetted in Connection._set_current_channel - self._declared_exchanges = set() - self._declared_queues = set() - - self._consume_loop_stopped = False - self.channel = None - self.purpose = purpose - - # NOTE(sileht): if purpose is PURPOSE_LISTEN - # we don't need the lock because we don't - # have a heartbeat thread - if purpose == rpc_common.PURPOSE_SEND: - self._connection_lock = ConnectionLock() - else: - self._connection_lock = DummyConnectionLock() - - self.connection_id = str(uuid.uuid4()) - self.name = "%s:%d:%s" % (os.path.basename(sys.argv[0]), - os.getpid(), - self.connection_id) - self.connection = kombu.connection.Connection( - self._url, ssl=self._fetch_ssl_params(), - login_method=self.login_method, - heartbeat=self.heartbeat_timeout_threshold, - failover_strategy=self.kombu_failover_strategy, - transport_options={ - 'confirm_publish': True, - 'client_properties': { - 'capabilities': { - 'authentication_failure_close': True, - 'connection.blocked': True, - 'consumer_cancel_notify': True - }, - 'connection_name': self.name}, - 'on_blocked': self._on_connection_blocked, - 'on_unblocked': self._on_connection_unblocked, - }, - ) - - LOG.debug('[%(connection_id)s] Connecting to AMQP server on' - ' %(hostname)s:%(port)s', - self._get_connection_info()) - - # NOTE(sileht): kombu recommend to run heartbeat_check every - # seconds, but we use a lock around the kombu connection - # so, to not lock to much this lock to most of the time do nothing - # expected waiting the events drain, we start heartbeat_check and - # retrieve the server heartbeat packet only two times more than - # the minimum required for the heartbeat works - # (heatbeat_timeout/heartbeat_rate/2.0, default kombu - # heartbeat_rate is 2) - self._heartbeat_wait_timeout = ( - float(self.heartbeat_timeout_threshold) / - float(self.heartbeat_rate) / 2.0) - self._heartbeat_support_log_emitted = False - - # NOTE(sileht): just ensure the connection is setuped at startup - self.ensure_connection() - - # NOTE(sileht): if purpose is PURPOSE_LISTEN - # the consume code does the heartbeat stuff - # we don't need a thread - self._heartbeat_thread = None - if purpose == rpc_common.PURPOSE_SEND: - self._heartbeat_start() - - LOG.debug('[%(connection_id)s] Connected to AMQP server on ' - '%(hostname)s:%(port)s via [%(transport)s] client with' - ' port %(client_port)s.', - self._get_connection_info()) - - # NOTE(sileht): value chosen according the best practice from kombu - # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop - # For heatbeat, we can set a bigger timeout, and check we receive the - # heartbeat packets regulary - if self._heartbeat_supported_and_enabled(): - self._poll_timeout = self._heartbeat_wait_timeout - else: - self._poll_timeout = 1 - - if self._url.startswith('memory://'): - # Kludge to speed up tests. - self.connection.transport.polling_interval = 0.0 - # Fixup logging - self.connection.hostname = "memory_driver" - self.connection.port = 1234 - self._poll_timeout = 0.05 - - # FIXME(markmc): use oslo sslutils when it is available as a library - _SSL_PROTOCOLS = { - "tlsv1": ssl.PROTOCOL_TLSv1, - "sslv23": ssl.PROTOCOL_SSLv23 - } - - _OPTIONAL_PROTOCOLS = { - 'sslv2': 'PROTOCOL_SSLv2', - 'sslv3': 'PROTOCOL_SSLv3', - 'tlsv1_1': 'PROTOCOL_TLSv1_1', - 'tlsv1_2': 'PROTOCOL_TLSv1_2', - } - for protocol in _OPTIONAL_PROTOCOLS: - try: - _SSL_PROTOCOLS[protocol] = getattr(ssl, - _OPTIONAL_PROTOCOLS[protocol]) - except AttributeError: - pass - - @classmethod - def validate_ssl_version(cls, version): - key = version.lower() - try: - return cls._SSL_PROTOCOLS[key] - except KeyError: - raise RuntimeError(_("Invalid SSL version : %s") % version) - - def _parse_url_hostname(self, hostname): - """Handles hostname returned from urlparse and checks whether it's - ipaddress. If it's ipaddress it ensures that it has brackets for IPv6. - """ - return '[%s]' % hostname if ':' in hostname else hostname - - def _fetch_ssl_params(self): - """Handles fetching what ssl params should be used for the connection - (if any). - """ - if self.rabbit_use_ssl: - ssl_params = dict() - - # http://docs.python.org/library/ssl.html - ssl.wrap_socket - if self.kombu_ssl_version: - ssl_params['ssl_version'] = self.validate_ssl_version( - self.kombu_ssl_version) - if self.kombu_ssl_keyfile: - ssl_params['keyfile'] = self.kombu_ssl_keyfile - if self.kombu_ssl_certfile: - ssl_params['certfile'] = self.kombu_ssl_certfile - if self.kombu_ssl_ca_certs: - ssl_params['ca_certs'] = self.kombu_ssl_ca_certs - # We might want to allow variations in the - # future with this? - ssl_params['cert_reqs'] = ssl.CERT_REQUIRED - return ssl_params or True - return False - - @staticmethod - def _on_connection_blocked(reason): - LOG.error(_LE("The broker has blocked the connection: %s"), reason) - - @staticmethod - def _on_connection_unblocked(): - LOG.info(_LI("The broker has unblocked the connection")) - - def ensure_connection(self): - # NOTE(sileht): we reset the channel and ensure - # the kombu underlying connection works - self._set_current_channel(None) - self.ensure(method=lambda: self.connection.connection) - self.set_transport_socket_timeout() - - def ensure(self, method, retry=None, - recoverable_error_callback=None, error_callback=None, - timeout_is_error=True): - """Will retry up to retry number of times. - retry = None means use the value of rabbit_max_retries - retry = -1 means to retry forever - retry = 0 means no retry - retry = N means N retries - - NOTE(sileht): Must be called within the connection lock - """ - - current_pid = os.getpid() - if self._initial_pid != current_pid: - LOG.warning(_LW("Process forked after connection established! " - "This can result in unpredictable behavior. " - "See: http://docs.openstack.org/developer/" - "oslo.messaging/transport.html")) - self._initial_pid = current_pid - - if retry is None: - retry = self.max_retries - if retry is None or retry < 0: - retry = None - - def on_error(exc, interval): - LOG.debug("[%s] Received recoverable error from kombu:" - % self.connection_id, - exc_info=True) - - recoverable_error_callback and recoverable_error_callback(exc) - - interval = (self.kombu_reconnect_delay + interval - if self.kombu_reconnect_delay > 0 - else interval) - - info = {'err_str': exc, 'sleep_time': interval} - info.update(self._get_connection_info()) - - if 'Socket closed' in six.text_type(exc): - LOG.error(_LE('[%(connection_id)s] AMQP server' - ' %(hostname)s:%(port)s closed' - ' the connection. Check login credentials:' - ' %(err_str)s'), info) - else: - LOG.error(_LE('[%(connection_id)s] AMQP server on ' - '%(hostname)s:%(port)s is unreachable: ' - '%(err_str)s. Trying again in ' - '%(sleep_time)d seconds. Client port: ' - '%(client_port)s'), info) - - # XXX(nic): when reconnecting to a RabbitMQ cluster - # with mirrored queues in use, the attempt to release the - # connection can hang "indefinitely" somewhere deep down - # in Kombu. Blocking the thread for a bit prior to - # release seems to kludge around the problem where it is - # otherwise reproduceable. - # TODO(sileht): Check if this is useful since we - # use kombu for HA connection, the interval_step - # should sufficient, because the underlying kombu transport - # connection object freed. - if self.kombu_reconnect_delay > 0: - LOG.trace('Delaying reconnect for %1.1f seconds ...', - self.kombu_reconnect_delay) - time.sleep(self.kombu_reconnect_delay) - - def on_reconnection(new_channel): - """Callback invoked when the kombu reconnects and creates - a new channel, we use it the reconfigure our consumers. - """ - - self.set_transport_socket_timeout() - self._set_current_channel(new_channel) - for consumer in self._consumers: - consumer.declare(self) - - LOG.info(_LI('[%(connection_id)s] Reconnected to AMQP server on ' - '%(hostname)s:%(port)s via [%(transport)s] client' - 'with port %(client_port)s.'), - self._get_connection_info()) - - def execute_method(channel): - self._set_current_channel(channel) - method() - - # NOTE(sileht): Some dummy driver like the in-memory one doesn't - # have notion of recoverable connection, so we must raise the original - # exception like kombu does in this case. - has_modern_errors = hasattr( - self.connection.transport, 'recoverable_connection_errors', - ) - if has_modern_errors: - recoverable_errors = ( - self.connection.recoverable_channel_errors + - self.connection.recoverable_connection_errors) - else: - recoverable_errors = () - - try: - autoretry_method = self.connection.autoretry( - execute_method, channel=self.channel, - max_retries=retry, - errback=on_error, - interval_start=self.interval_start or 1, - interval_step=self.interval_stepping, - interval_max=self.interval_max, - on_revive=on_reconnection) - ret, channel = autoretry_method() - self._set_current_channel(channel) - return ret - except recoverable_errors as exc: - LOG.debug("Received recoverable error from kombu:", - exc_info=True) - error_callback and error_callback(exc) - self._set_current_channel(None) - # NOTE(sileht): number of retry exceeded and the connection - # is still broken - info = {'err_str': exc, 'retry': retry} - info.update(self.connection.info()) - msg = _('Unable to connect to AMQP server on ' - '%(hostname)s:%(port)s after %(retry)s ' - 'tries: %(err_str)s') % info - LOG.error(msg) - raise exceptions.MessageDeliveryFailure(msg) - except rpc_amqp.AMQPDestinationNotFound: - # NOTE(sileht): we must reraise this without - # trigger error_callback - raise - except Exception as exc: - error_callback and error_callback(exc) - raise - - def _set_current_channel(self, new_channel): - """Change the channel to use. - - NOTE(sileht): Must be called within the connection lock - """ - if new_channel == self.channel: - return - - if self.channel is not None: - self._declared_queues.clear() - self._declared_exchanges.clear() - self.connection.maybe_close_channel(self.channel) - - self.channel = new_channel - - if new_channel is not None: - if self.purpose == rpc_common.PURPOSE_LISTEN: - self._set_qos(new_channel) - self._producer = kombu.messaging.Producer(new_channel) - - def _set_qos(self, channel): - """Set QoS prefetch count on the channel""" - if self.rabbit_qos_prefetch_count > 0: - channel.basic_qos(0, - self.rabbit_qos_prefetch_count, - False) - - def close(self): - """Close/release this connection.""" - self._heartbeat_stop() - if self.connection: - for consumer, tag in self._consumers.items(): - if consumer.type == 'fanout': - LOG.debug('[connection close] Deleting fanout ' - 'queue: %s ' % consumer.queue.name) - consumer.queue.delete() - self._set_current_channel(None) - self.connection.release() - self.connection = None - - def reset(self): - """Reset a connection so it can be used again.""" - recoverable_errors = (self.connection.recoverable_channel_errors + - self.connection.recoverable_connection_errors) - with self._connection_lock: - try: - for consumer, tag in self._consumers.items(): - consumer.cancel(tag=tag) - except recoverable_errors: - self.ensure_connection() - self._consumers.clear() - self._active_tags.clear() - self._new_tags.clear() - self._tags = itertools.count(1) - - def _heartbeat_supported_and_enabled(self): - if self.heartbeat_timeout_threshold <= 0: - return False - - if self.connection.supports_heartbeats: - return True - elif not self._heartbeat_support_log_emitted: - LOG.warning(_LW("Heartbeat support requested but it is not " - "supported by the kombu driver or the broker")) - self._heartbeat_support_log_emitted = True - return False - - def set_transport_socket_timeout(self, timeout=None): - # NOTE(sileht): they are some case where the heartbeat check - # or the producer.send return only when the system socket - # timeout if reach. kombu doesn't allow use to customise this - # timeout so for py-amqp we tweak ourself - # NOTE(dmitryme): Current approach works with amqp==1.4.9 and - # kombu==3.0.33. Once the commit below is released, we should - # try to set the socket timeout in the constructor: - # https://github.com/celery/py-amqp/pull/64 - - heartbeat_timeout = self.heartbeat_timeout_threshold - if self._heartbeat_supported_and_enabled(): - # NOTE(sileht): we are supposed to send heartbeat every - # heartbeat_timeout, no need to wait more otherwise will - # disconnect us, so raise timeout earlier ourself - if timeout is None: - timeout = heartbeat_timeout - else: - timeout = min(heartbeat_timeout, timeout) - - try: - sock = self.channel.connection.sock - except AttributeError as e: - # Level is set to debug because otherwise we would spam the logs - LOG.debug('[%s] Failed to get socket attribute: %s' - % (self.connection_id, str(e))) - else: - sock.settimeout(timeout) - # TCP_USER_TIMEOUT is not defined on Windows and Mac OS X - if sys.platform != 'win32' and sys.platform != 'darwin': - try: - timeout = timeout * 1000 if timeout is not None else 0 - # NOTE(gdavoian): only integers and strings are allowed - # as socket options' values, and TCP_USER_TIMEOUT option - # can take only integer values, so we round-up the timeout - # to the nearest integer in order to ensure that the - # connection is not broken before the expected timeout - sock.setsockopt(socket.IPPROTO_TCP, - TCP_USER_TIMEOUT, - int(math.ceil(timeout))) - except socket.error as error: - code = error[0] - # TCP_USER_TIMEOUT not defined on kernels <2.6.37 - if code != errno.ENOPROTOOPT: - raise - - @contextlib.contextmanager - def _transport_socket_timeout(self, timeout): - self.set_transport_socket_timeout(timeout) - yield - self.set_transport_socket_timeout() - - def _heartbeat_check(self): - # NOTE(sileht): we are supposed to send at least one heartbeat - # every heartbeat_timeout_threshold, so no need to way more - self.connection.heartbeat_check(rate=self.heartbeat_rate) - - def _heartbeat_start(self): - if self._heartbeat_supported_and_enabled(): - self._heartbeat_exit_event = threading.Event() - self._heartbeat_thread = threading.Thread( - target=self._heartbeat_thread_job) - self._heartbeat_thread.daemon = True - self._heartbeat_thread.start() - else: - self._heartbeat_thread = None - - def _heartbeat_stop(self): - if self._heartbeat_thread is not None: - self._heartbeat_exit_event.set() - self._heartbeat_thread.join() - self._heartbeat_thread = None - - def _heartbeat_thread_job(self): - """Thread that maintains inactive connections - """ - while not self._heartbeat_exit_event.is_set(): - with self._connection_lock.for_heartbeat(): - - recoverable_errors = ( - self.connection.recoverable_channel_errors + - self.connection.recoverable_connection_errors) - - try: - try: - self._heartbeat_check() - # NOTE(sileht): We need to drain event to receive - # heartbeat from the broker but don't hold the - # connection too much times. In amqpdriver a connection - # is used exclusively for read or for write, so we have - # to do this for connection used for write drain_events - # already do that for other connection - try: - self.connection.drain_events(timeout=0.001) - except socket.timeout: - pass - except recoverable_errors as exc: - LOG.info(_LI("A recoverable connection/channel error " - "occurred, trying to reconnect: %s"), exc) - self.ensure_connection() - except Exception: - LOG.warning(_LW("Unexpected error during heartbeart " - "thread processing, retrying...")) - LOG.debug('Exception', exc_info=True) - - self._heartbeat_exit_event.wait( - timeout=self._heartbeat_wait_timeout) - self._heartbeat_exit_event.clear() - - def declare_consumer(self, consumer): - """Create a Consumer using the class that was passed in and - add it to our list of consumers - """ - - def _connect_error(exc): - log_info = {'topic': consumer.routing_key, 'err_str': exc} - LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': " - "%(err_str)s"), log_info) - - def _declare_consumer(): - consumer.declare(self) - tag = self._active_tags.get(consumer.queue_name) - if tag is None: - tag = next(self._tags) - self._active_tags[consumer.queue_name] = tag - self._new_tags.add(tag) - - self._consumers[consumer] = tag - return consumer - - with self._connection_lock: - return self.ensure(_declare_consumer, - error_callback=_connect_error) - - def consume(self, timeout=None): - """Consume from all queues/consumers.""" - - timer = rpc_common.DecayingTimer(duration=timeout) - timer.start() - - def _raise_timeout(exc): - LOG.debug('Timed out waiting for RPC response: %s', exc) - raise rpc_common.Timeout() - - def _recoverable_error_callback(exc): - if not isinstance(exc, rpc_common.Timeout): - self._new_tags = set(self._consumers.values()) - timer.check_return(_raise_timeout, exc) - - def _error_callback(exc): - _recoverable_error_callback(exc) - LOG.error(_LE('Failed to consume message from queue: %s'), - exc) - - def _consume(): - # NOTE(sileht): in case the acknowledgment or requeue of a - # message fail, the kombu transport can be disconnected - # In this case, we must redeclare our consumers, so raise - # a recoverable error to trigger the reconnection code. - if not self.connection.connected: - raise self.connection.recoverable_connection_errors[0] - - consume_max_retries = 2 - while self._new_tags and consume_max_retries: - for consumer, tag in self._consumers.items(): - if tag in self._new_tags: - try: - consumer.consume(tag=tag) - self._new_tags.remove(tag) - except self.connection.channel_errors as exc: - # NOTE(kbespalov): during the interval between - # a queue declaration and consumer declaration - # the queue can disappear. In this case - # we must redeclare queue and try to re-consume. - # More details is here: - # bugs.launchpad.net/oslo.messaging/+bug/1581148 - if exc.code == 404 and consume_max_retries: - consumer.declare(self) - # NOTE(kbespalov): the broker closes a channel - # at any channel error. The py-amqp catches - # this situation and re-open a new channel. - # So, we must re-declare all consumers again. - self._new_tags = set(self._consumers.values()) - consume_max_retries -= 1 - break - else: - raise - - poll_timeout = (self._poll_timeout if timeout is None - else min(timeout, self._poll_timeout)) - while True: - if self._consume_loop_stopped: - return - - if self._heartbeat_supported_and_enabled(): - self._heartbeat_check() - - try: - self.connection.drain_events(timeout=poll_timeout) - return - except socket.timeout as exc: - poll_timeout = timer.check_return( - _raise_timeout, exc, maximum=self._poll_timeout) - - with self._connection_lock: - self.ensure(_consume, - recoverable_error_callback=_recoverable_error_callback, - error_callback=_error_callback) - - def stop_consuming(self): - self._consume_loop_stopped = True - - def declare_direct_consumer(self, topic, callback): - """Create a 'direct' queue. - In nova's use, this is generally a msg_id queue used for - responses for call/multicall - """ - - consumer = Consumer(exchange_name=topic, - queue_name=topic, - routing_key=topic, - type='direct', - durable=False, - exchange_auto_delete=True, - queue_auto_delete=False, - callback=callback, - rabbit_ha_queues=self.rabbit_ha_queues, - rabbit_queue_ttl=self.rabbit_transient_queues_ttl) - - self.declare_consumer(consumer) - - def declare_topic_consumer(self, exchange_name, topic, callback=None, - queue_name=None): - """Create a 'topic' consumer.""" - consumer = Consumer(exchange_name=exchange_name, - queue_name=queue_name or topic, - routing_key=topic, - type='topic', - durable=self.amqp_durable_queues, - exchange_auto_delete=self.amqp_auto_delete, - queue_auto_delete=self.amqp_auto_delete, - callback=callback, - rabbit_ha_queues=self.rabbit_ha_queues) - - self.declare_consumer(consumer) - - def declare_fanout_consumer(self, topic, callback): - """Create a 'fanout' consumer.""" - - unique = uuid.uuid4().hex - exchange_name = '%s_fanout' % topic - queue_name = '%s_fanout_%s' % (topic, unique) - - consumer = Consumer(exchange_name=exchange_name, - queue_name=queue_name, - routing_key=topic, - type='fanout', - durable=False, - exchange_auto_delete=True, - queue_auto_delete=False, - callback=callback, - rabbit_ha_queues=self.rabbit_ha_queues, - rabbit_queue_ttl=self.rabbit_transient_queues_ttl) - - self.declare_consumer(consumer) - - def _ensure_publishing(self, method, exchange, msg, routing_key=None, - timeout=None, retry=None): - """Send to a publisher based on the publisher class.""" - - def _error_callback(exc): - log_info = {'topic': exchange.name, 'err_str': exc} - LOG.error(_LE("Failed to publish message to topic " - "'%(topic)s': %(err_str)s"), log_info) - LOG.debug('Exception', exc_info=exc) - - method = functools.partial(method, exchange, msg, routing_key, timeout) - - with self._connection_lock: - self.ensure(method, retry=retry, error_callback=_error_callback) - - def _get_connection_info(self): - info = self.connection.info() - client_port = None - if self.channel and hasattr(self.channel.connection, 'sock'): - client_port = self.channel.connection.sock.getsockname()[1] - info.update({'client_port': client_port, - 'connection_id': self.connection_id}) - return info - - def _publish(self, exchange, msg, routing_key=None, timeout=None): - """Publish a message.""" - - if not (exchange.passive or exchange.name in self._declared_exchanges): - exchange(self.channel).declare() - self._declared_exchanges.add(exchange.name) - - log_info = {'msg': msg, - 'who': exchange or 'default', - 'key': routing_key} - LOG.trace('Connection._publish: sending message %(msg)s to' - ' %(who)s with routing key %(key)s', log_info) - - # NOTE(sileht): no need to wait more, caller expects - # a answer before timeout is reached - with self._transport_socket_timeout(timeout): - self._producer.publish(msg, - exchange=exchange, - routing_key=routing_key, - expiration=timeout, - compression=self.kombu_compression) - - def _publish_and_creates_default_queue(self, exchange, msg, - routing_key=None, timeout=None): - """Publisher that declares a default queue - - When the exchange is missing instead of silently creates an exchange - not binded to a queue, this publisher creates a default queue - named with the routing_key - - This is mainly used to not miss notification in case of nobody consumes - them yet. If the future consumer bind the default queue it can retrieve - missing messages. - - _set_current_channel is responsible to cleanup the cache. - """ - queue_indentifier = (exchange.name, routing_key) - # NOTE(sileht): We only do it once per reconnection - # the Connection._set_current_channel() is responsible to clear - # this cache - if queue_indentifier not in self._declared_queues: - queue = kombu.entity.Queue( - channel=self.channel, - exchange=exchange, - durable=exchange.durable, - auto_delete=exchange.auto_delete, - name=routing_key, - routing_key=routing_key, - queue_arguments=_get_queue_arguments(self.rabbit_ha_queues, 0)) - log_info = {'key': routing_key, 'exchange': exchange} - LOG.trace( - 'Connection._publish_and_creates_default_queue: ' - 'declare queue %(key)s on %(exchange)s exchange', log_info) - queue.declare() - self._declared_queues.add(queue_indentifier) - - self._publish(exchange, msg, routing_key=routing_key, timeout=timeout) - - def _publish_and_raises_on_missing_exchange(self, exchange, msg, - routing_key=None, - timeout=None): - """Publisher that raises exception if exchange is missing.""" - if not exchange.passive: - raise RuntimeError("_publish_and_retry_on_missing_exchange() must " - "be called with an passive exchange.") - - try: - self._publish(exchange, msg, routing_key=routing_key, - timeout=timeout) - return - except self.connection.channel_errors as exc: - if exc.code == 404: - # NOTE(noelbk/sileht): - # If rabbit dies, the consumer can be disconnected before the - # publisher sends, and if the consumer hasn't declared the - # queue, the publisher's will send a message to an exchange - # that's not bound to a queue, and the message wll be lost. - # So we set passive=True to the publisher exchange and catch - # the 404 kombu ChannelError and retry until the exchange - # appears - raise rpc_amqp.AMQPDestinationNotFound( - "exchange %s doesn't exists" % exchange.name) - raise - - def direct_send(self, msg_id, msg): - """Send a 'direct' message.""" - exchange = kombu.entity.Exchange(name=msg_id, - type='direct', - durable=False, - auto_delete=True, - passive=True) - - self._ensure_publishing(self._publish_and_raises_on_missing_exchange, - exchange, msg, routing_key=msg_id) - - def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None): - """Send a 'topic' message.""" - exchange = kombu.entity.Exchange( - name=exchange_name, - type='topic', - durable=self.amqp_durable_queues, - auto_delete=self.amqp_auto_delete) - - self._ensure_publishing(self._publish, exchange, msg, - routing_key=topic, timeout=timeout, - retry=retry) - - def fanout_send(self, topic, msg, retry=None): - """Send a 'fanout' message.""" - exchange = kombu.entity.Exchange(name='%s_fanout' % topic, - type='fanout', - durable=False, - auto_delete=True) - - self._ensure_publishing(self._publish, exchange, msg, retry=retry) - - def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs): - """Send a notify message on a topic.""" - exchange = kombu.entity.Exchange( - name=exchange_name, - type='topic', - durable=self.amqp_durable_queues, - auto_delete=self.amqp_auto_delete) - - self._ensure_publishing(self._publish_and_creates_default_queue, - exchange, msg, routing_key=topic, retry=retry) - - -class RabbitDriver(amqpdriver.AMQPDriverBase): - """RabbitMQ Driver - - The ``rabbit`` driver is the default driver used in OpenStack's - integration tests. - - The driver is aliased as ``kombu`` to support upgrading existing - installations with older settings. - - """ - - def __init__(self, conf, url, - default_exchange=None, - allowed_remote_exmods=None): - opt_group = cfg.OptGroup(name='oslo_messaging_rabbit', - title='RabbitMQ driver options') - conf.register_group(opt_group) - conf.register_opts(rabbit_opts, group=opt_group) - conf.register_opts(rpc_amqp.amqp_opts, group=opt_group) - conf.register_opts(base.base_opts, group=opt_group) - - self.missing_destination_retry_timeout = ( - conf.oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout) - - self.prefetch_size = ( - conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count) - - # the pool configuration properties - max_size = conf.oslo_messaging_rabbit.rpc_conn_pool_size - min_size = conf.oslo_messaging_rabbit.conn_pool_min_size - ttl = conf.oslo_messaging_rabbit.conn_pool_ttl - - connection_pool = pool.ConnectionPool( - conf, max_size, min_size, ttl, - url, Connection) - - super(RabbitDriver, self).__init__( - conf, url, - connection_pool, - default_exchange, - allowed_remote_exmods - ) - - def require_features(self, requeue=True): - pass diff --git a/oslo_messaging/_drivers/impl_zmq.py b/oslo_messaging/_drivers/impl_zmq.py deleted file mode 100644 index 90c2c20..0000000 --- a/oslo_messaging/_drivers/impl_zmq.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright 2011 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import threading - -from stevedore import driver - -from oslo_messaging._drivers import base -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers.zmq_driver.client import zmq_client -from oslo_messaging._drivers.zmq_driver.server import zmq_server -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_options -from oslo_messaging._i18n import _LE - - -RPCException = rpc_common.RPCException -LOG = logging.getLogger(__name__) - - -class LazyDriverItem(object): - - def __init__(self, item_cls, *args, **kwargs): - self._lock = threading.Lock() - self.item = None - self.item_class = item_cls - self.args = args - self.kwargs = kwargs - self.process_id = os.getpid() - - def get(self): - # NOTE(ozamiatin): Lazy initialization. - # All init stuff moved closer to usage point - lazy init. - # Better design approach is to initialize in the driver's - # __init__, but 'fork' extensively used by services - # breaks all things. - - if self.item is not None and os.getpid() == self.process_id: - return self.item - - with self._lock: - if self.item is None or os.getpid() != self.process_id: - self.process_id = os.getpid() - self.item = self.item_class(*self.args, **self.kwargs) - return self.item - - def cleanup(self): - if self.item: - self.item.cleanup() - - -class ZmqDriver(base.BaseDriver): - - """ZeroMQ Driver implementation. - - Provides implementation of RPC and Notifier APIs by means - of ZeroMQ library. - - See :doc:`zmq_driver` for details. - """ - - def __init__(self, conf, url, default_exchange=None, - allowed_remote_exmods=None): - """Construct ZeroMQ driver. - - Initialize driver options. - - Construct matchmaker - pluggable interface to targets management - Name Service - - Construct client and server controllers - - :param conf: oslo messaging configuration object - :type conf: oslo_config.CONF - :param url: transport URL - :type url: TransportUrl - :param default_exchange: Not used in zmq implementation - :type default_exchange: None - :param allowed_remote_exmods: remote exception passing options - :type allowed_remote_exmods: list - """ - zmq = zmq_async.import_zmq() - if zmq is None: - raise ImportError(_LE("ZeroMQ is not available!")) - - zmq_options.register_opts(conf) - self.conf = conf - self.allowed_remote_exmods = allowed_remote_exmods - - self.matchmaker = driver.DriverManager( - 'oslo.messaging.zmq.matchmaker', - self.get_matchmaker_backend(url), - ).driver(self.conf, url=url) - - client_cls = zmq_client.ZmqClientProxy - if conf.oslo_messaging_zmq.use_pub_sub and not \ - conf.oslo_messaging_zmq.use_router_proxy: - client_cls = zmq_client.ZmqClientMixDirectPubSub - elif not conf.oslo_messaging_zmq.use_pub_sub and not \ - conf.oslo_messaging_zmq.use_router_proxy: - client_cls = zmq_client.ZmqClientDirect - - self.client = LazyDriverItem( - client_cls, self.conf, self.matchmaker, - self.allowed_remote_exmods) - - self.notifier = LazyDriverItem( - client_cls, self.conf, self.matchmaker, - self.allowed_remote_exmods) - - super(ZmqDriver, self).__init__(conf, url, default_exchange, - allowed_remote_exmods) - - def get_matchmaker_backend(self, url): - zmq_transport, p, matchmaker_backend = url.transport.partition('+') - assert zmq_transport == 'zmq', "Needs to be zmq for this transport!" - if not matchmaker_backend: - return self.conf.oslo_messaging_zmq.rpc_zmq_matchmaker - elif matchmaker_backend not in zmq_options.MATCHMAKER_BACKENDS: - raise rpc_common.RPCException( - _LE("Incorrect matchmaker backend name %(backend_name)s!" - "Available names are: %(available_names)s") % - {"backend_name": matchmaker_backend, - "available_names": zmq_options.MATCHMAKER_BACKENDS}) - return matchmaker_backend - - def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, - retry=None): - """Send RPC message to server - - :param target: Message destination target - :type target: oslo_messaging.Target - :param ctxt: Message context - :type ctxt: dict - :param message: Message payload to pass - :type message: dict - :param wait_for_reply: Waiting for reply flag - :type wait_for_reply: bool - :param timeout: Reply waiting timeout in seconds - :type timeout: int - :param retry: an optional default connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - client = self.client.get() - if wait_for_reply: - return client.send_call(target, ctxt, message, timeout, retry) - elif target.fanout: - client.send_fanout(target, ctxt, message, retry) - else: - client.send_cast(target, ctxt, message, retry) - - def send_notification(self, target, ctxt, message, version, retry=None): - """Send notification to server - - :param target: Message destination target - :type target: oslo_messaging.Target - :param ctxt: Message context - :type ctxt: dict - :param message: Message payload to pass - :type message: dict - :param version: Messaging API version - :type version: str - :param retry: an optional default connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - client = self.notifier.get() - client.send_notify(target, ctxt, message, version, retry) - - def listen(self, target, batch_size, batch_timeout): - """Listen to a specified target on a server side - - :param target: Message destination target - :type target: oslo_messaging.Target - """ - listener = zmq_server.ZmqServer(self, self.conf, self.matchmaker, - target) - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - """Listen to a specified list of targets on a server side - - :param targets_and_priorities: List of pairs (target, priority) - :type targets_and_priorities: list - :param pool: Not used for zmq implementation - :type pool: object - """ - listener = zmq_server.ZmqNotificationServer( - self, self.conf, self.matchmaker, targets_and_priorities) - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def cleanup(self): - """Cleanup all driver's connections finally - """ - self.client.cleanup() - self.notifier.cleanup() diff --git a/oslo_messaging/_drivers/pika_driver/__init__.py b/oslo_messaging/_drivers/pika_driver/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/pika_driver/pika_commons.py b/oslo_messaging/_drivers/pika_driver/pika_commons.py deleted file mode 100644 index 0737043..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_commons.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import select -import socket - -from oslo_utils import timeutils -from pika import exceptions as pika_exceptions -import six - - -PIKA_CONNECTIVITY_ERRORS = ( - pika_exceptions.AMQPConnectionError, - pika_exceptions.ConnectionClosed, - pika_exceptions.ChannelClosed, - socket.timeout, - select.error -) - -EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' - -INFINITE_STOP_WATCH = timeutils.StopWatch(duration=None).start() diff --git a/oslo_messaging/_drivers/pika_driver/pika_connection.py b/oslo_messaging/_drivers/pika_driver/pika_connection.py deleted file mode 100644 index f0dca5a..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_connection.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import logging -import os -import threading - -import futurist -from pika.adapters import select_connection -from pika import exceptions as pika_exceptions -from pika import spec as pika_spec - -from oslo_utils import eventletutils - -current_thread = eventletutils.fetch_current_thread_functor() - -LOG = logging.getLogger(__name__) - - -class ThreadSafePikaConnection(object): - def __init__(self, parameters=None, - _impl_class=select_connection.SelectConnection): - self.params = parameters - self._connection_lock = threading.Lock() - self._evt_closed = threading.Event() - self._task_queue = collections.deque() - self._pending_connection_futures = set() - - create_connection_future = self._register_pending_future() - - def on_open_error(conn, err): - create_connection_future.set_exception( - pika_exceptions.AMQPConnectionError(err) - ) - - self._impl = _impl_class( - parameters=parameters, - on_open_callback=create_connection_future.set_result, - on_open_error_callback=on_open_error, - on_close_callback=self._on_connection_close, - stop_ioloop_on_close=False, - ) - self._interrupt_pipein, self._interrupt_pipeout = os.pipe() - self._impl.ioloop.add_handler(self._interrupt_pipein, - self._impl.ioloop.read_interrupt, - select_connection.READ) - - self._thread = threading.Thread(target=self._process_io) - self._thread.daemon = True - self._thread_id = None - self._thread.start() - - create_connection_future.result() - - def _check_called_not_from_event_loop(self): - if current_thread() == self._thread_id: - raise RuntimeError("This call is not allowed from ioloop thread") - - def _execute_task(self, func, *args, **kwargs): - if current_thread() == self._thread_id: - return func(*args, **kwargs) - - future = futurist.Future() - self._task_queue.append((func, args, kwargs, future)) - - if self._evt_closed.is_set(): - self._notify_all_futures_connection_close() - elif self._interrupt_pipeout is not None: - os.write(self._interrupt_pipeout, b'X') - - return future.result() - - def _register_pending_future(self): - future = futurist.Future() - self._pending_connection_futures.add(future) - - def on_done_callback(fut): - try: - self._pending_connection_futures.remove(fut) - except KeyError: - pass - - future.add_done_callback(on_done_callback) - - if self._evt_closed.is_set(): - self._notify_all_futures_connection_close() - return future - - def _notify_all_futures_connection_close(self): - while self._task_queue: - try: - method_res_future = self._task_queue.pop()[3] - except KeyError: - break - else: - method_res_future.set_exception( - pika_exceptions.ConnectionClosed() - ) - - while self._pending_connection_futures: - try: - pending_connection_future = ( - self._pending_connection_futures.pop() - ) - except KeyError: - break - else: - pending_connection_future.set_exception( - pika_exceptions.ConnectionClosed() - ) - - def _on_connection_close(self, conn, reply_code, reply_text): - self._evt_closed.set() - self._notify_all_futures_connection_close() - if self._interrupt_pipeout: - os.close(self._interrupt_pipeout) - os.close(self._interrupt_pipein) - - def add_on_close_callback(self, callback): - return self._execute_task(self._impl.add_on_close_callback, callback) - - def _do_process_io(self): - while self._task_queue: - func, args, kwargs, future = self._task_queue.pop() - try: - res = func(*args, **kwargs) - except BaseException as e: - LOG.exception(e) - future.set_exception(e) - else: - future.set_result(res) - - self._impl.ioloop.poll() - self._impl.ioloop.process_timeouts() - - def _process_io(self): - self._thread_id = current_thread() - while not self._evt_closed.is_set(): - try: - self._do_process_io() - except BaseException: - LOG.exception("Error during processing connection's IO") - - def close(self, *args, **kwargs): - self._check_called_not_from_event_loop() - - res = self._execute_task(self._impl.close, *args, **kwargs) - - self._evt_closed.wait() - self._thread.join() - return res - - def channel(self, channel_number=None): - self._check_called_not_from_event_loop() - - channel_opened_future = self._register_pending_future() - - impl_channel = self._execute_task( - self._impl.channel, - on_open_callback=channel_opened_future.set_result, - channel_number=channel_number - ) - - # Create our proxy channel - channel = ThreadSafePikaChannel(impl_channel, self) - - # Link implementation channel with our proxy channel - impl_channel._set_cookie(channel) - - channel_opened_future.result() - return channel - - def add_timeout(self, timeout, callback): - return self._execute_task(self._impl.add_timeout, timeout, callback) - - def remove_timeout(self, timeout_id): - return self._execute_task(self._impl.remove_timeout, timeout_id) - - @property - def is_closed(self): - return self._impl.is_closed - - @property - def is_closing(self): - return self._impl.is_closing - - @property - def is_open(self): - return self._impl.is_open - - -class ThreadSafePikaChannel(object): # pylint: disable=R0904,R0902 - - def __init__(self, channel_impl, connection): - self._impl = channel_impl - self._connection = connection - - self._delivery_confirmation = False - - self._message_returned = False - self._current_future = None - - self._evt_closed = threading.Event() - - self.add_on_close_callback(self._on_channel_close) - - def _execute_task(self, func, *args, **kwargs): - return self._connection._execute_task(func, *args, **kwargs) - - def _on_channel_close(self, channel, reply_code, reply_text): - self._evt_closed.set() - - if self._current_future: - self._current_future.set_exception( - pika_exceptions.ChannelClosed(reply_code, reply_text)) - - def _on_message_confirmation(self, frame): - self._current_future.set_result(frame) - - def add_on_close_callback(self, callback): - self._execute_task(self._impl.add_on_close_callback, callback) - - def add_on_cancel_callback(self, callback): - self._execute_task(self._impl.add_on_cancel_callback, callback) - - def __int__(self): - return self.channel_number - - @property - def channel_number(self): - return self._impl.channel_number - - @property - def is_closed(self): - return self._impl.is_closed - - @property - def is_closing(self): - return self._impl.is_closing - - @property - def is_open(self): - return self._impl.is_open - - def close(self, reply_code=0, reply_text="Normal Shutdown"): - self._impl.close(reply_code=reply_code, reply_text=reply_text) - self._evt_closed.wait() - - def _check_called_not_from_event_loop(self): - self._connection._check_called_not_from_event_loop() - - def flow(self, active): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task( - self._impl.flow, callback=self._current_future.set_result, - active=active - ) - - return self._current_future.result() - - def basic_consume(self, # pylint: disable=R0913 - consumer_callback, - queue, - no_ack=False, - exclusive=False, - consumer_tag=None, - arguments=None): - - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task( - self._impl.add_callback, self._current_future.set_result, - replies=[pika_spec.Basic.ConsumeOk], one_shot=True - ) - - self._impl.add_callback(self._current_future.set_result, - replies=[pika_spec.Basic.ConsumeOk], - one_shot=True) - tag = self._execute_task( - self._impl.basic_consume, - consumer_callback=consumer_callback, - queue=queue, - no_ack=no_ack, - exclusive=exclusive, - consumer_tag=consumer_tag, - arguments=arguments - ) - - self._current_future.result() - return tag - - def basic_cancel(self, consumer_tag): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task( - self._impl.basic_cancel, - callback=self._current_future.set_result, - consumer_tag=consumer_tag, - nowait=False) - self._current_future.result() - - def basic_ack(self, delivery_tag=0, multiple=False): - return self._execute_task( - self._impl.basic_ack, delivery_tag=delivery_tag, multiple=multiple) - - def basic_nack(self, delivery_tag=None, multiple=False, requeue=True): - return self._execute_task( - self._impl.basic_nack, delivery_tag=delivery_tag, - multiple=multiple, requeue=requeue - ) - - def publish(self, exchange, routing_key, body, # pylint: disable=R0913 - properties=None, mandatory=False, immediate=False): - - if self._delivery_confirmation: - self._check_called_not_from_event_loop() - - # In publisher-acknowledgments mode - self._message_returned = False - self._current_future = futurist.Future() - - self._execute_task(self._impl.basic_publish, - exchange=exchange, - routing_key=routing_key, - body=body, - properties=properties, - mandatory=mandatory, - immediate=immediate) - - conf_method = self._current_future.result().method - - if isinstance(conf_method, pika_spec.Basic.Nack): - raise pika_exceptions.NackError((None,)) - else: - assert isinstance(conf_method, pika_spec.Basic.Ack), ( - conf_method) - - if self._message_returned: - raise pika_exceptions.UnroutableError((None,)) - else: - # In non-publisher-acknowledgments mode - self._execute_task(self._impl.basic_publish, - exchange=exchange, - routing_key=routing_key, - body=body, - properties=properties, - mandatory=mandatory, - immediate=immediate) - - def basic_qos(self, prefetch_size=0, prefetch_count=0, all_channels=False): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.basic_qos, - callback=self._current_future.set_result, - prefetch_size=prefetch_size, - prefetch_count=prefetch_count, - all_channels=all_channels) - self._current_future.result() - - def basic_recover(self, requeue=False): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task( - self._impl.basic_recover, - callback=lambda: self._current_future.set_result(None), - requeue=requeue - ) - self._current_future.result() - - def basic_reject(self, delivery_tag=None, requeue=True): - self._execute_task(self._impl.basic_reject, - delivery_tag=delivery_tag, - requeue=requeue) - - def _on_message_returned(self, *args, **kwargs): - self._message_returned = True - - def confirm_delivery(self): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.add_callback, - callback=self._current_future.set_result, - replies=[pika_spec.Confirm.SelectOk], - one_shot=True) - self._execute_task(self._impl.confirm_delivery, - callback=self._on_message_confirmation, - nowait=False) - self._current_future.result() - - self._delivery_confirmation = True - self._execute_task(self._impl.add_on_return_callback, - self._on_message_returned) - - def exchange_declare(self, exchange=None, # pylint: disable=R0913 - exchange_type='direct', passive=False, durable=False, - auto_delete=False, internal=False, - arguments=None, **kwargs): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.exchange_declare, - callback=self._current_future.set_result, - exchange=exchange, - exchange_type=exchange_type, - passive=passive, - durable=durable, - auto_delete=auto_delete, - internal=internal, - nowait=False, - arguments=arguments, - type=kwargs["type"] if kwargs else None) - - return self._current_future.result() - - def exchange_delete(self, exchange=None, if_unused=False): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.exchange_delete, - callback=self._current_future.set_result, - exchange=exchange, - if_unused=if_unused, - nowait=False) - - return self._current_future.result() - - def exchange_bind(self, destination=None, source=None, routing_key='', - arguments=None): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.exchange_bind, - callback=self._current_future.set_result, - destination=destination, - source=source, - routing_key=routing_key, - nowait=False, - arguments=arguments) - - return self._current_future.result() - - def exchange_unbind(self, destination=None, source=None, routing_key='', - arguments=None): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.exchange_unbind, - callback=self._current_future.set_result, - destination=destination, - source=source, - routing_key=routing_key, - nowait=False, - arguments=arguments) - - return self._current_future.result() - - def queue_declare(self, queue='', passive=False, durable=False, - exclusive=False, auto_delete=False, - arguments=None): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.queue_declare, - callback=self._current_future.set_result, - queue=queue, - passive=passive, - durable=durable, - exclusive=exclusive, - auto_delete=auto_delete, - nowait=False, - arguments=arguments) - - return self._current_future.result() - - def queue_delete(self, queue='', if_unused=False, if_empty=False): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.queue_delete, - callback=self._current_future.set_result, - queue=queue, - if_unused=if_unused, - if_empty=if_empty, - nowait=False) - - return self._current_future.result() - - def queue_purge(self, queue=''): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.queue_purge, - callback=self._current_future.set_result, - queue=queue, - nowait=False) - return self._current_future.result() - - def queue_bind(self, queue, exchange, routing_key=None, - arguments=None): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.queue_bind, - callback=self._current_future.set_result, - queue=queue, - exchange=exchange, - routing_key=routing_key, - nowait=False, - arguments=arguments) - return self._current_future.result() - - def queue_unbind(self, queue='', exchange=None, routing_key=None, - arguments=None): - self._check_called_not_from_event_loop() - - self._current_future = futurist.Future() - self._execute_task(self._impl.queue_unbind, - callback=self._current_future.set_result, - queue=queue, - exchange=exchange, - routing_key=routing_key, - arguments=arguments) - return self._current_future.result() diff --git a/oslo_messaging/_drivers/pika_driver/pika_connection_factory.py b/oslo_messaging/_drivers/pika_driver/pika_connection_factory.py deleted file mode 100644 index a78c55e..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_connection_factory.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging -import random -import socket -import threading -import time - -from oslo_config import cfg -import pika -from pika import credentials as pika_credentials - -from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns -from oslo_messaging._drivers.pika_driver import pika_connection -from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc - -LOG = logging.getLogger(__name__) - -# constant for setting tcp_user_timeout socket option -# (it should be defined in 'select' module of standard library in future) -TCP_USER_TIMEOUT = 18 - -# constants for creating connection statistics -HOST_CONNECTION_LAST_TRY_TIME = "last_try_time" -HOST_CONNECTION_LAST_SUCCESS_TRY_TIME = "last_success_try_time" - -pika_opts = [ - cfg.IntOpt('channel_max', - help='Maximum number of channels to allow'), - cfg.IntOpt('frame_max', - help='The maximum byte size for an AMQP frame'), - cfg.IntOpt('heartbeat_interval', default=3, - help="How often to send heartbeats for consumer's connections"), - cfg.BoolOpt('ssl', - help='Enable SSL'), - cfg.DictOpt('ssl_options', - help='Arguments passed to ssl.wrap_socket'), - cfg.FloatOpt('socket_timeout', default=0.25, - help="Set socket timeout in seconds for connection's socket"), - cfg.FloatOpt('tcp_user_timeout', default=0.25, - help="Set TCP_USER_TIMEOUT in seconds for connection's " - "socket"), - cfg.FloatOpt('host_connection_reconnect_delay', default=0.25, - help="Set delay for reconnection to some host which has " - "connection error"), - cfg.StrOpt('connection_factory', default="single", - choices=["new", "single", "read_write"], - help='Connection factory implementation') -] - - -class PikaConnectionFactory(object): - - def __init__(self, url, conf): - self._url = url - self._conf = conf - - self._connection_lock = threading.RLock() - - if not url.hosts: - raise ValueError("You should provide at least one RabbitMQ host") - - # initializing connection parameters for configured RabbitMQ hosts - self._common_pika_params = { - 'virtual_host': url.virtual_host, - 'channel_max': conf.oslo_messaging_pika.channel_max, - 'frame_max': conf.oslo_messaging_pika.frame_max, - 'ssl': conf.oslo_messaging_pika.ssl, - 'ssl_options': conf.oslo_messaging_pika.ssl_options, - 'socket_timeout': conf.oslo_messaging_pika.socket_timeout - } - - self._host_list = url.hosts - self._heartbeat_interval = conf.oslo_messaging_pika.heartbeat_interval - self._host_connection_reconnect_delay = ( - conf.oslo_messaging_pika.host_connection_reconnect_delay - ) - self._tcp_user_timeout = conf.oslo_messaging_pika.tcp_user_timeout - - self._connection_host_status = {} - - self._cur_connection_host_num = random.randint( - 0, len(url.hosts) - 1 - ) - - def cleanup(self): - pass - - def create_connection(self, for_listening=False): - """Create and return connection to any available host. - - :return: created connection - :raise: ConnectionException if all hosts are not reachable - """ - - with self._connection_lock: - - host_count = len(self._host_list) - connection_attempts = host_count - - while connection_attempts > 0: - self._cur_connection_host_num += 1 - self._cur_connection_host_num %= host_count - try: - return self._create_host_connection( - self._cur_connection_host_num, for_listening - ) - except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: - LOG.warning("Can't establish connection to host. %s", e) - except pika_drv_exc.HostConnectionNotAllowedException as e: - LOG.warning("Connection to host is not allowed. %s", e) - - connection_attempts -= 1 - - raise pika_drv_exc.EstablishConnectionException( - "Can not establish connection to any configured RabbitMQ " - "host: " + str(self._host_list) - ) - - def _set_tcp_user_timeout(self, s): - if not self._tcp_user_timeout: - return - try: - s.setsockopt( - socket.IPPROTO_TCP, TCP_USER_TIMEOUT, - int(self._tcp_user_timeout * 1000) - ) - except socket.error: - LOG.warning( - "Whoops, this kernel doesn't seem to support TCP_USER_TIMEOUT." - ) - - def _create_host_connection(self, host_index, for_listening): - """Create new connection to host #host_index - - :param host_index: Integer, number of host for connection establishing - :param for_listening: Boolean, creates connection for listening - if True - :return: New connection - """ - host = self._host_list[host_index] - - cur_time = time.time() - - host_connection_status = self._connection_host_status.get(host) - - if host_connection_status is None: - host_connection_status = { - HOST_CONNECTION_LAST_SUCCESS_TRY_TIME: 0, - HOST_CONNECTION_LAST_TRY_TIME: 0 - } - self._connection_host_status[host] = host_connection_status - - last_success_time = host_connection_status[ - HOST_CONNECTION_LAST_SUCCESS_TRY_TIME - ] - last_time = host_connection_status[ - HOST_CONNECTION_LAST_TRY_TIME - ] - - # raise HostConnectionNotAllowedException if we tried to establish - # connection in last 'host_connection_reconnect_delay' and got - # failure - if (last_time != last_success_time and - cur_time - last_time < - self._host_connection_reconnect_delay): - raise pika_drv_exc.HostConnectionNotAllowedException( - "Connection to host #{} is not allowed now because of " - "previous failure".format(host_index) - ) - - try: - connection = self._do_create_host_connection( - host, for_listening - ) - self._connection_host_status[host][ - HOST_CONNECTION_LAST_SUCCESS_TRY_TIME - ] = cur_time - - return connection - finally: - self._connection_host_status[host][ - HOST_CONNECTION_LAST_TRY_TIME - ] = cur_time - - def _do_create_host_connection(self, host, for_listening): - connection_params = pika.ConnectionParameters( - host=host.hostname, - port=host.port, - credentials=pika_credentials.PlainCredentials( - host.username, host.password - ), - heartbeat_interval=( - self._heartbeat_interval if for_listening else None - ), - **self._common_pika_params - ) - if for_listening: - connection = pika_connection.ThreadSafePikaConnection( - parameters=connection_params - ) - else: - connection = pika.BlockingConnection( - parameters=connection_params - ) - connection.params = connection_params - - self._set_tcp_user_timeout(connection._impl.socket) - return connection - - -class NotClosableConnection(object): - def __init__(self, connection): - self._connection = connection - - def __getattr__(self, item): - return getattr(self._connection, item) - - def close(self): - pass - - -class SinglePikaConnectionFactory(PikaConnectionFactory): - def __init__(self, url, conf): - super(SinglePikaConnectionFactory, self).__init__(url, conf) - self._connection = None - - def create_connection(self, for_listening=False): - with self._connection_lock: - if self._connection is None or not self._connection.is_open: - self._connection = ( - super(SinglePikaConnectionFactory, self).create_connection( - True - ) - ) - return NotClosableConnection(self._connection) - - def cleanup(self): - with self._connection_lock: - if self._connection is not None and self._connection.is_open: - try: - self._connection.close() - except Exception: - LOG.warning( - "Unexpected exception during connection closing", - exc_info=True - ) - self._connection = None - - -class ReadWritePikaConnectionFactory(PikaConnectionFactory): - def __init__(self, url, conf): - super(ReadWritePikaConnectionFactory, self).__init__(url, conf) - self._read_connection = None - self._write_connection = None - - def create_connection(self, for_listening=False): - with self._connection_lock: - if for_listening: - if (self._read_connection is None or - not self._read_connection.is_open): - self._read_connection = super( - ReadWritePikaConnectionFactory, self - ).create_connection(True) - return NotClosableConnection(self._read_connection) - else: - if (self._write_connection is None or - not self._write_connection.is_open): - self._write_connection = super( - ReadWritePikaConnectionFactory, self - ).create_connection(True) - return NotClosableConnection(self._write_connection) - - def cleanup(self): - with self._connection_lock: - if (self._read_connection is not None and - self._read_connection.is_open): - try: - self._read_connection.close() - except Exception: - LOG.warning( - "Unexpected exception during connection closing", - exc_info=True - ) - self._read_connection = None - - if (self._write_connection is not None and - self._write_connection.is_open): - try: - self._write_connection.close() - except Exception: - LOG.warning( - "Unexpected exception during connection closing", - exc_info=True - ) - self._write_connection = None diff --git a/oslo_messaging/_drivers/pika_driver/pika_engine.py b/oslo_messaging/_drivers/pika_driver/pika_engine.py deleted file mode 100644 index 97b6792..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_engine.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging -import os -import threading -import uuid - -from oslo_utils import eventletutils -import pika_pool -from stevedore import driver - -from oslo_messaging._drivers import common as drv_cmn -from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns -from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc - -LOG = logging.getLogger(__name__) - - -class _PooledConnectionWithConfirmations(pika_pool.Connection): - """Derived from 'pika_pool.Connection' and extends its logic - adds - 'confirm_delivery' call after channel creation to enable delivery - confirmation for channel - """ - @property - def channel(self): - if self.fairy.channel is None: - self.fairy.channel = self.fairy.cxn.channel() - self.fairy.channel.confirm_delivery() - return self.fairy.channel - - -class PikaEngine(object): - """Used for shared functionality between other pika driver modules, like - connection factory, connection pools, processing and holding configuration, - etc. - """ - - def __init__(self, conf, url, default_exchange=None, - allowed_remote_exmods=None): - conf = drv_cmn.ConfigOptsProxy(conf, url) - self.conf = conf - self.url = url - - self._connection_factory_type = ( - self.conf.oslo_messaging_pika.connection_factory - ) - - self._connection_factory = None - self._connection_without_confirmation_pool = None - self._connection_with_confirmation_pool = None - self._pid = None - self._init_lock = threading.Lock() - - self.host_connection_reconnect_delay = ( - conf.oslo_messaging_pika.host_connection_reconnect_delay - ) - - # processing rpc options - self.default_rpc_exchange = ( - conf.oslo_messaging_pika.default_rpc_exchange - ) - self.rpc_reply_exchange = ( - conf.oslo_messaging_pika.rpc_reply_exchange - ) - - self.allowed_remote_exmods = [pika_drv_cmns.EXCEPTIONS_MODULE] - if allowed_remote_exmods: - self.allowed_remote_exmods.extend(allowed_remote_exmods) - - self.rpc_listener_prefetch_count = ( - conf.oslo_messaging_pika.rpc_listener_prefetch_count - ) - - self.default_rpc_retry_attempts = ( - conf.oslo_messaging_pika.default_rpc_retry_attempts - ) - - self.rpc_retry_delay = ( - conf.oslo_messaging_pika.rpc_retry_delay - ) - if self.rpc_retry_delay < 0: - raise ValueError("rpc_retry_delay should be non-negative integer") - - self.rpc_reply_listener_prefetch_count = ( - conf.oslo_messaging_pika.rpc_listener_prefetch_count - ) - - self.rpc_reply_retry_attempts = ( - conf.oslo_messaging_pika.rpc_reply_retry_attempts - ) - self.rpc_reply_retry_delay = ( - conf.oslo_messaging_pika.rpc_reply_retry_delay - ) - if self.rpc_reply_retry_delay < 0: - raise ValueError("rpc_reply_retry_delay should be non-negative " - "integer") - - self.rpc_queue_expiration = ( - self.conf.oslo_messaging_pika.rpc_queue_expiration - ) - - # processing notification options - self.default_notification_exchange = ( - conf.oslo_messaging_pika.default_notification_exchange - ) - - self.notification_persistence = ( - conf.oslo_messaging_pika.notification_persistence - ) - - self.notification_listener_prefetch_count = ( - conf.oslo_messaging_pika.notification_listener_prefetch_count - ) - - self.default_notification_retry_attempts = ( - conf.oslo_messaging_pika.default_notification_retry_attempts - ) - if self.default_notification_retry_attempts is None: - raise ValueError("default_notification_retry_attempts should be " - "an integer") - self.notification_retry_delay = ( - conf.oslo_messaging_pika.notification_retry_delay - ) - if (self.notification_retry_delay is None or - self.notification_retry_delay < 0): - raise ValueError("notification_retry_delay should be non-negative " - "integer") - - def _init_if_needed(self): - cur_pid = os.getpid() - - if self._pid == cur_pid: - return - - with self._init_lock: - if self._pid == cur_pid: - return - - if self._pid: - LOG.warning("New pid is detected. Old: %s, new: %s. " - "Cleaning up...", self._pid, cur_pid) - - # Note(dukhlov): we need to force select poller usage in case - # when 'thread' module is monkey patched becase current - # eventlet implementation does not support patching of - # poll/epoll/kqueue - if eventletutils.is_monkey_patched("thread"): - from pika.adapters import select_connection - select_connection.SELECT_TYPE = "select" - - mgr = driver.DriverManager( - 'oslo.messaging.pika.connection_factory', - self._connection_factory_type - ) - - self._connection_factory = mgr.driver(self.url, self.conf) - - # initializing 2 connection pools: 1st for connections without - # confirmations, 2nd - with confirmations - self._connection_without_confirmation_pool = pika_pool.QueuedPool( - create=self.create_connection, - max_size=self.conf.oslo_messaging_pika.pool_max_size, - max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, - timeout=self.conf.oslo_messaging_pika.pool_timeout, - recycle=self.conf.oslo_messaging_pika.pool_recycle, - stale=self.conf.oslo_messaging_pika.pool_stale, - ) - - self._connection_with_confirmation_pool = pika_pool.QueuedPool( - create=self.create_connection, - max_size=self.conf.oslo_messaging_pika.pool_max_size, - max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, - timeout=self.conf.oslo_messaging_pika.pool_timeout, - recycle=self.conf.oslo_messaging_pika.pool_recycle, - stale=self.conf.oslo_messaging_pika.pool_stale, - ) - - self._connection_with_confirmation_pool.Connection = ( - _PooledConnectionWithConfirmations - ) - - self._pid = cur_pid - - def create_connection(self, for_listening=False): - self._init_if_needed() - return self._connection_factory.create_connection(for_listening) - - @property - def connection_without_confirmation_pool(self): - self._init_if_needed() - return self._connection_without_confirmation_pool - - @property - def connection_with_confirmation_pool(self): - self._init_if_needed() - return self._connection_with_confirmation_pool - - def cleanup(self): - if self._connection_factory: - self._connection_factory.cleanup() - - def declare_exchange_by_channel(self, channel, exchange, exchange_type, - durable): - """Declare exchange using already created channel, if they don't exist - - :param channel: Channel for communication with RabbitMQ - :param exchange: String, RabbitMQ exchange name - :param exchange_type: String ('direct', 'topic' or 'fanout') - exchange type for exchange to be declared - :param durable: Boolean, creates durable exchange if true - """ - try: - channel.exchange_declare( - exchange, exchange_type, auto_delete=True, durable=durable - ) - except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: - raise pika_drv_exc.ConnectionException( - "Connectivity problem detected during declaring exchange: " - "exchange:{}, exchange_type: {}, durable: {}. {}".format( - exchange, exchange_type, durable, str(e) - ) - ) - - def declare_queue_binding_by_channel(self, channel, exchange, queue, - routing_key, exchange_type, - queue_expiration, durable): - """Declare exchange, queue and bind them using already created - channel, if they don't exist - - :param channel: Channel for communication with RabbitMQ - :param exchange: String, RabbitMQ exchange name - :param queue: Sting, RabbitMQ queue name - :param routing_key: Sting, RabbitMQ routing key for queue binding - :param exchange_type: String ('direct', 'topic' or 'fanout') - exchange type for exchange to be declared - :param queue_expiration: Integer, time in seconds which queue will - remain existing in RabbitMQ when there no consumers connected - :param durable: Boolean, creates durable exchange and queue if true - """ - try: - channel.exchange_declare( - exchange, exchange_type, auto_delete=True, durable=durable - ) - arguments = {} - - if queue_expiration > 0: - arguments['x-expires'] = queue_expiration * 1000 - - channel.queue_declare(queue, durable=durable, arguments=arguments) - - channel.queue_bind(queue, exchange, routing_key) - except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: - raise pika_drv_exc.ConnectionException( - "Connectivity problem detected during declaring queue " - "binding: exchange:{}, queue: {}, routing_key: {}, " - "exchange_type: {}, queue_expiration: {}, " - "durable: {}. {}".format( - exchange, queue, routing_key, exchange_type, - queue_expiration, durable, str(e) - ) - ) - - def get_rpc_exchange_name(self, exchange): - """Returns RabbitMQ exchange name for given rpc request - - :param exchange: String, oslo.messaging target's exchange - - :return: String, RabbitMQ exchange name - """ - return exchange or self.default_rpc_exchange - - @staticmethod - def get_rpc_queue_name(topic, server, no_ack, worker=False): - """Returns RabbitMQ queue name for given rpc request - - :param topic: String, oslo.messaging target's topic - :param server: String, oslo.messaging target's server - :param no_ack: Boolean, use message delivery with acknowledges or not - :param worker: Boolean, use queue by single worker only or not - - :return: String, RabbitMQ queue name - """ - queue_parts = ["no_ack" if no_ack else "with_ack", topic] - if server is not None: - queue_parts.append(server) - if worker: - queue_parts.append("worker") - queue_parts.append(uuid.uuid4().hex) - queue = '.'.join(queue_parts) - return queue diff --git a/oslo_messaging/_drivers/pika_driver/pika_exceptions.py b/oslo_messaging/_drivers/pika_driver/pika_exceptions.py deleted file mode 100644 index c32d7e4..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_exceptions.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_messaging import exceptions - - -class ExchangeNotFoundException(exceptions.MessageDeliveryFailure): - """Is raised if specified exchange is not found in RabbitMQ.""" - pass - - -class MessageRejectedException(exceptions.MessageDeliveryFailure): - """Is raised if message which you are trying to send was nacked by RabbitMQ - it may happen if RabbitMQ is not able to process message - """ - pass - - -class RoutingException(exceptions.MessageDeliveryFailure): - """Is raised if message can not be delivered to any queue. Usually it means - that any queue is not binded to given exchange with given routing key. - Raised if 'mandatory' flag specified only - """ - pass - - -class ConnectionException(exceptions.MessagingException): - """Is raised if some operation can not be performed due to connectivity - problem - """ - pass - - -class TimeoutConnectionException(ConnectionException): - """Is raised if socket timeout was expired during network interaction""" - pass - - -class EstablishConnectionException(ConnectionException): - """Is raised if we have some problem during establishing connection - procedure - """ - pass - - -class HostConnectionNotAllowedException(EstablishConnectionException): - """Is raised in case of try to establish connection to temporary - not allowed host (because of reconnection policy for example) - """ - pass - - -class UnsupportedDriverVersion(exceptions.MessagingException): - """Is raised when message is received but was sent by different, - not supported driver version - """ - pass diff --git a/oslo_messaging/_drivers/pika_driver/pika_listener.py b/oslo_messaging/_drivers/pika_driver/pika_listener.py deleted file mode 100644 index 1942fcc..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_listener.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading -import uuid - -from concurrent import futures -from oslo_log import log as logging - -from oslo_messaging._drivers.pika_driver import pika_poller as pika_drv_poller - -LOG = logging.getLogger(__name__) - - -class RpcReplyPikaListener(object): - """Provide functionality for listening RPC replies. Create and handle - reply poller and coroutine for performing polling job - """ - - def __init__(self, pika_engine): - super(RpcReplyPikaListener, self).__init__() - self._pika_engine = pika_engine - - # preparing poller for listening replies - self._reply_queue = None - - self._reply_poller = None - self._reply_waiting_futures = {} - - self._reply_consumer_initialized = False - self._reply_consumer_initialization_lock = threading.Lock() - self._shutdown = False - - def get_reply_qname(self): - """As result return reply queue name, shared for whole process, - but before this check is RPC listener initialized or not and perform - initialization if needed - - :return: String, queue name which hould be used for reply sending - """ - if self._reply_consumer_initialized: - return self._reply_queue - - with self._reply_consumer_initialization_lock: - if self._reply_consumer_initialized: - return self._reply_queue - - # generate reply queue name if needed - if self._reply_queue is None: - self._reply_queue = "reply.{}.{}.{}".format( - self._pika_engine.conf.project, - self._pika_engine.conf.prog, uuid.uuid4().hex - ) - - # initialize reply poller if needed - if self._reply_poller is None: - self._reply_poller = pika_drv_poller.RpcReplyPikaPoller( - self._pika_engine, self._pika_engine.rpc_reply_exchange, - self._reply_queue, 1, None, - self._pika_engine.rpc_reply_listener_prefetch_count - ) - - self._reply_poller.start(self._on_incoming) - self._reply_consumer_initialized = True - - return self._reply_queue - - def _on_incoming(self, incoming): - """Reply polling job. Poll replies in infinite loop and notify - registered features - """ - for message in incoming: - try: - message.acknowledge() - future = self._reply_waiting_futures.pop( - message.msg_id, None - ) - if future is not None: - future.set_result(message) - except Exception: - LOG.exception("Unexpected exception during processing" - "reply message") - - def register_reply_waiter(self, msg_id): - """Register reply waiter. Should be called before message sending to - the server - :param msg_id: String, message_id of expected reply - :return future: Future, container for expected reply to be returned - over - """ - future = futures.Future() - self._reply_waiting_futures[msg_id] = future - return future - - def unregister_reply_waiter(self, msg_id): - """Unregister reply waiter. Should be called if client has not got - reply and doesn't want to continue waiting (if timeout_expired for - example) - :param msg_id: - """ - self._reply_waiting_futures.pop(msg_id, None) - - def cleanup(self): - """Stop replies consuming and cleanup resources""" - self._shutdown = True - - if self._reply_poller: - self._reply_poller.stop() - self._reply_poller.cleanup() - self._reply_poller = None - - self._reply_queue = None diff --git a/oslo_messaging/_drivers/pika_driver/pika_message.py b/oslo_messaging/_drivers/pika_driver/pika_message.py deleted file mode 100644 index 2802bed..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_message.py +++ /dev/null @@ -1,613 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import socket -import time -import traceback -import uuid - -from concurrent import futures -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import importutils -from oslo_utils import timeutils -from pika import exceptions as pika_exceptions -from pika import spec as pika_spec -import pika_pool -import retrying -import six - - -import oslo_messaging -from oslo_messaging._drivers import base -from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns -from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc -from oslo_messaging import _utils as utils -from oslo_messaging import exceptions - - -LOG = logging.getLogger(__name__) - -_VERSION_HEADER = "version" -_VERSION = "1.0" - - -class RemoteExceptionMixin(object): - """Used for constructing dynamic exception type during deserialization of - remote exception. It defines unified '__init__' method signature and - exception message format - """ - def __init__(self, module, clazz, message, trace): - """Store serialized data - :param module: String, module name for importing original exception - class of serialized remote exception - :param clazz: String, original class name of serialized remote - exception - :param message: String, original message of serialized remote - exception - :param trace: String, original trace of serialized remote exception - """ - self.module = module - self.clazz = clazz - self.message = message - self.trace = trace - - self._str_msgs = message + "\n" + "\n".join(trace) - - def __str__(self): - return self._str_msgs - - -class PikaIncomingMessage(base.IncomingMessage): - """Driver friendly adapter for received message. Extract message - information from RabbitMQ message and provide access to it - """ - - def __init__(self, pika_engine, channel, method, properties, body): - """Parse RabbitMQ message - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param channel: Channel, RabbitMQ channel which was used for - this message delivery, used for sending ack back. - If None - ack is not required - :param method: Method, RabbitMQ message method - :param properties: Properties, RabbitMQ message properties - :param body: Bytes, RabbitMQ message body - """ - headers = getattr(properties, "headers", {}) - version = headers.get(_VERSION_HEADER, None) - if not utils.version_is_compatible(version, _VERSION): - raise pika_drv_exc.UnsupportedDriverVersion( - "Message's version: {} is not compatible with driver version: " - "{}".format(version, _VERSION)) - - self._pika_engine = pika_engine - self._channel = channel - self._delivery_tag = method.delivery_tag - - self._version = version - - self._content_type = properties.content_type - self._content_encoding = properties.content_encoding - self.unique_id = properties.message_id - - self.expiration_time = ( - None if properties.expiration is None else - time.time() + float(properties.expiration) / 1000 - ) - - if self._content_type != "application/json": - raise NotImplementedError( - "Content-type['{}'] is not valid, " - "'application/json' only is supported.".format( - self._content_type - ) - ) - - message_dict = jsonutils.loads(body, encoding=self._content_encoding) - - context_dict = {} - - for key in list(message_dict.keys()): - key = six.text_type(key) - if key.startswith('_$_'): - value = message_dict.pop(key) - context_dict[key[3:]] = value - - super(PikaIncomingMessage, self).__init__(context_dict, message_dict) - - def need_ack(self): - return self._channel is not None - - def acknowledge(self): - """Ack the message. Should be called by message processing logic when - it considered as consumed (means that we don't need redelivery of this - message anymore) - """ - if self.need_ack(): - self._channel.basic_ack(delivery_tag=self._delivery_tag) - - def requeue(self): - """Rollback the message. Should be called by message processing logic - when it can not process the message right now and should be redelivered - later if it is possible - """ - if self.need_ack(): - return self._channel.basic_nack(delivery_tag=self._delivery_tag, - requeue=True) - - -class RpcPikaIncomingMessage(PikaIncomingMessage, base.RpcIncomingMessage): - """PikaIncomingMessage implementation for RPC messages. It expects - extra RPC related fields in message body (msg_id and reply_q). Also 'reply' - method added to allow consumer to send RPC reply back to the RPC client - """ - - def __init__(self, pika_engine, channel, method, properties, body): - """Defines default values of msg_id and reply_q fields and just call - super.__init__ method - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param channel: Channel, RabbitMQ channel which was used for - this message delivery, used for sending ack back. - If None - ack is not required - :param method: Method, RabbitMQ message method - :param properties: Properties, RabbitMQ message properties - :param body: Bytes, RabbitMQ message body - """ - super(RpcPikaIncomingMessage, self).__init__( - pika_engine, channel, method, properties, body - ) - self.reply_q = properties.reply_to - self.msg_id = properties.correlation_id - - def reply(self, reply=None, failure=None): - """Send back reply to the RPC client - :param reply: Dictionary, reply. In case of exception should be None - :param failure: Tuple, should be a sys.exc_info() tuple. - Should be None if RPC request was successfully processed. - - :return RpcReplyPikaIncomingMessage, message with reply - """ - - if self.reply_q is None: - return - - reply_outgoing_message = RpcReplyPikaOutgoingMessage( - self._pika_engine, self.msg_id, reply=reply, failure_info=failure, - content_type=self._content_type, - content_encoding=self._content_encoding - ) - - def on_exception(ex): - if isinstance(ex, pika_drv_exc.ConnectionException): - LOG.warning( - "Connectivity related problem during reply sending. %s", - ex - ) - return True - else: - return False - - retrier = retrying.retry( - stop_max_attempt_number=( - None if self._pika_engine.rpc_reply_retry_attempts == -1 - else self._pika_engine.rpc_reply_retry_attempts - ), - retry_on_exception=on_exception, - wait_fixed=self._pika_engine.rpc_reply_retry_delay * 1000, - ) if self._pika_engine.rpc_reply_retry_attempts else None - - try: - timeout = (None if self.expiration_time is None else - max(self.expiration_time - time.time(), 0)) - with timeutils.StopWatch(duration=timeout) as stopwatch: - reply_outgoing_message.send( - reply_q=self.reply_q, - stopwatch=stopwatch, - retrier=retrier - ) - LOG.debug( - "Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q - ) - except Exception: - LOG.exception( - "Message [id:'%s'] wasn't replied to : %s", self.msg_id, - self.reply_q - ) - - -class RpcReplyPikaIncomingMessage(PikaIncomingMessage): - """PikaIncomingMessage implementation for RPC reply messages. It expects - extra RPC reply related fields in message body (result and failure). - """ - def __init__(self, pika_engine, channel, method, properties, body): - """Defines default values of result and failure fields, call - super.__init__ method and then construct Exception object if failure is - not None - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param channel: Channel, RabbitMQ channel which was used for - this message delivery, used for sending ack back. - If None - ack is not required - :param method: Method, RabbitMQ message method - :param properties: Properties, RabbitMQ message properties - :param body: Bytes, RabbitMQ message body - """ - super(RpcReplyPikaIncomingMessage, self).__init__( - pika_engine, channel, method, properties, body - ) - - self.msg_id = properties.correlation_id - - self.result = self.message.get("s", None) - self.failure = self.message.get("e", None) - - if self.failure is not None: - trace = self.failure.get('t', []) - message = self.failure.get('s', "") - class_name = self.failure.get('c') - module_name = self.failure.get('m') - - res_exc = None - - if module_name in pika_engine.allowed_remote_exmods: - try: - module = importutils.import_module(module_name) - klass = getattr(module, class_name) - - ex_type = type( - klass.__name__, - (RemoteExceptionMixin, klass), - {} - ) - - res_exc = ex_type(module_name, class_name, message, trace) - except ImportError as e: - LOG.warning( - "Can not deserialize remote exception [module:%s, " - "class:%s]. %s", module_name, class_name, e - ) - - # if we have not processed failure yet, use RemoteError class - if res_exc is None: - res_exc = oslo_messaging.RemoteError( - class_name, message, trace - ) - self.failure = res_exc - - -class PikaOutgoingMessage(object): - """Driver friendly adapter for sending message. Construct RabbitMQ message - and send it - """ - - def __init__(self, pika_engine, message, context, - content_type="application/json", content_encoding="utf-8"): - """Parse RabbitMQ message - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param message: Dictionary, user's message fields - :param context: Dictionary, request context's fields - :param content_type: String, content-type header, defines serialization - mechanism - :param content_encoding: String, defines encoding for text data - """ - - self._pika_engine = pika_engine - - self._content_type = content_type - self._content_encoding = content_encoding - - if self._content_type != "application/json": - raise NotImplementedError( - "Content-type['{}'] is not valid, " - "'application/json' only is supported.".format( - self._content_type - ) - ) - - self.message = message - self.context = context - - self.unique_id = uuid.uuid4().hex - - def _prepare_message_to_send(self): - """Combine user's message fields an system fields (_unique_id, - context's data etc) - """ - msg = self.message.copy() - - if self.context: - for key, value in six.iteritems(self.context): - key = six.text_type(key) - msg['_$_' + key] = value - - props = pika_spec.BasicProperties( - content_encoding=self._content_encoding, - content_type=self._content_type, - headers={_VERSION_HEADER: _VERSION}, - message_id=self.unique_id, - ) - return msg, props - - @staticmethod - def _publish(pool, exchange, routing_key, body, properties, mandatory, - stopwatch): - """Execute pika publish method using connection from connection pool - Also this message catches all pika related exceptions and raise - oslo.messaging specific exceptions - - :param pool: Pool, pika connection pool for connection choosing - :param exchange: String, RabbitMQ exchange name for message sending - :param routing_key: String, RabbitMQ routing key for message routing - :param body: Bytes, RabbitMQ message payload - :param properties: Properties, RabbitMQ message properties - :param mandatory: Boolean, RabbitMQ publish mandatory flag (raise - exception if it is not possible to deliver message to any queue) - :param stopwatch: StopWatch, stopwatch object for calculating - allowed timeouts - """ - if stopwatch.expired(): - raise exceptions.MessagingTimeout( - "Timeout for current operation was expired." - ) - try: - timeout = stopwatch.leftover(return_none=True) - with pool.acquire(timeout=timeout) as conn: - if timeout is not None: - properties.expiration = str(int(timeout * 1000)) - conn.channel.publish( - exchange=exchange, - routing_key=routing_key, - body=body, - properties=properties, - mandatory=mandatory - ) - except pika_exceptions.NackError as e: - raise pika_drv_exc.MessageRejectedException( - "Can not send message: [body: {}], properties: {}] to " - "target [exchange: {}, routing_key: {}]. {}".format( - body, properties, exchange, routing_key, str(e) - ) - ) - except pika_exceptions.UnroutableError as e: - raise pika_drv_exc.RoutingException( - "Can not deliver message:[body:{}, properties: {}] to any " - "queue using target: [exchange:{}, " - "routing_key:{}]. {}".format( - body, properties, exchange, routing_key, str(e) - ) - ) - except pika_pool.Timeout as e: - raise exceptions.MessagingTimeout( - "Timeout for current operation was expired. {}".format(str(e)) - ) - except pika_pool.Connection.connectivity_errors as e: - if (isinstance(e, pika_exceptions.ChannelClosed) - and e.args and e.args[0] == 404): - raise pika_drv_exc.ExchangeNotFoundException( - "Attempt to send message to not existing exchange " - "detected, message: [body:{}, properties: {}], target: " - "[exchange:{}, routing_key:{}]. {}".format( - body, properties, exchange, routing_key, str(e) - ) - ) - - raise pika_drv_exc.ConnectionException( - "Connectivity problem detected during sending the message: " - "[body:{}, properties: {}] to target: [exchange:{}, " - "routing_key:{}]. {}".format( - body, properties, exchange, routing_key, str(e) - ) - ) - except socket.timeout: - raise pika_drv_exc.TimeoutConnectionException( - "Socket timeout exceeded." - ) - - def _do_send(self, exchange, routing_key, msg_dict, msg_props, - confirm=True, mandatory=True, persistent=False, - stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, retrier=None): - """Send prepared message with configured retrying - - :param exchange: String, RabbitMQ exchange name for message sending - :param routing_key: String, RabbitMQ routing key for message routing - :param msg_dict: Dictionary, message payload - :param msg_props: Properties, message properties - :param confirm: Boolean, enable publisher confirmation if True - :param mandatory: Boolean, RabbitMQ publish mandatory flag (raise - exception if it is not possible to deliver message to any queue) - :param persistent: Boolean, send persistent message if True, works only - for routing into durable queues - :param stopwatch: StopWatch, stopwatch object for calculating - allowed timeouts - :param retrier: retrying.Retrier, configured retrier object for sending - message, if None no retrying is performed - """ - msg_props.delivery_mode = 2 if persistent else 1 - - pool = (self._pika_engine.connection_with_confirmation_pool - if confirm else - self._pika_engine.connection_without_confirmation_pool) - - body = jsonutils.dump_as_bytes(msg_dict, - encoding=self._content_encoding) - - LOG.debug( - "Sending message:[body:%s; properties: %s] to target: " - "[exchange:%s; routing_key:%s]", body, msg_props, exchange, - routing_key - ) - - publish = (self._publish if retrier is None else - retrier(self._publish)) - - return publish(pool, exchange, routing_key, body, msg_props, - mandatory, stopwatch) - - def send(self, exchange, routing_key='', confirm=True, mandatory=True, - persistent=False, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, - retrier=None): - """Send message with configured retrying - - :param exchange: String, RabbitMQ exchange name for message sending - :param routing_key: String, RabbitMQ routing key for message routing - :param confirm: Boolean, enable publisher confirmation if True - :param mandatory: Boolean, RabbitMQ publish mandatory flag (raise - exception if it is not possible to deliver message to any queue) - :param persistent: Boolean, send persistent message if True, works only - for routing into durable queues - :param stopwatch: StopWatch, stopwatch object for calculating - allowed timeouts - :param retrier: retrying.Retrier, configured retrier object for sending - message, if None no retrying is performed - """ - msg_dict, msg_props = self._prepare_message_to_send() - - return self._do_send(exchange, routing_key, msg_dict, msg_props, - confirm, mandatory, persistent, - stopwatch, retrier) - - -class RpcPikaOutgoingMessage(PikaOutgoingMessage): - """PikaOutgoingMessage implementation for RPC messages. It adds - possibility to wait and receive RPC reply - """ - def __init__(self, pika_engine, message, context, - content_type="application/json", content_encoding="utf-8"): - super(RpcPikaOutgoingMessage, self).__init__( - pika_engine, message, context, content_type, content_encoding - ) - self.msg_id = None - self.reply_q = None - - def send(self, exchange, routing_key, reply_listener=None, - stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, retrier=None): - """Send RPC message with configured retrying - - :param exchange: String, RabbitMQ exchange name for message sending - :param routing_key: String, RabbitMQ routing key for message routing - :param reply_listener: RpcReplyPikaListener, listener for waiting - reply. If None - return immediately without reply waiting - :param stopwatch: StopWatch, stopwatch object for calculating - allowed timeouts - :param retrier: retrying.Retrier, configured retrier object for sending - message, if None no retrying is performed - """ - msg_dict, msg_props = self._prepare_message_to_send() - - if reply_listener: - self.msg_id = uuid.uuid4().hex - msg_props.correlation_id = self.msg_id - LOG.debug('MSG_ID is %s', self.msg_id) - - self.reply_q = reply_listener.get_reply_qname() - msg_props.reply_to = self.reply_q - - future = reply_listener.register_reply_waiter(msg_id=self.msg_id) - - self._do_send( - exchange=exchange, routing_key=routing_key, msg_dict=msg_dict, - msg_props=msg_props, confirm=True, mandatory=True, - persistent=False, stopwatch=stopwatch, retrier=retrier - ) - - try: - return future.result(stopwatch.leftover(return_none=True)) - except BaseException as e: - reply_listener.unregister_reply_waiter(self.msg_id) - if isinstance(e, futures.TimeoutError): - e = exceptions.MessagingTimeout() - raise e - else: - self._do_send( - exchange=exchange, routing_key=routing_key, msg_dict=msg_dict, - msg_props=msg_props, confirm=True, mandatory=True, - persistent=False, stopwatch=stopwatch, retrier=retrier - ) - - -class RpcReplyPikaOutgoingMessage(PikaOutgoingMessage): - """PikaOutgoingMessage implementation for RPC reply messages. It sets - correlation_id AMQP property to link this reply with response - """ - def __init__(self, pika_engine, msg_id, reply=None, failure_info=None, - content_type="application/json", content_encoding="utf-8"): - """Initialize with reply information for sending - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param msg_id: String, msg_id of RPC request, which waits for reply - :param reply: Dictionary, reply. In case of exception should be None - :param failure_info: Tuple, should be a sys.exc_info() tuple. - Should be None if RPC request was successfully processed. - :param content_type: String, content-type header, defines serialization - mechanism - :param content_encoding: String, defines encoding for text data - """ - self.msg_id = msg_id - - if failure_info is not None: - ex_class = failure_info[0] - ex = failure_info[1] - tb = traceback.format_exception(*failure_info) - if issubclass(ex_class, RemoteExceptionMixin): - failure_data = { - 'c': ex.clazz, - 'm': ex.module, - 's': ex.message, - 't': tb - } - else: - failure_data = { - 'c': six.text_type(ex_class.__name__), - 'm': six.text_type(ex_class.__module__), - 's': six.text_type(ex), - 't': tb - } - - msg = {'e': failure_data} - else: - msg = {'s': reply} - - super(RpcReplyPikaOutgoingMessage, self).__init__( - pika_engine, msg, None, content_type, content_encoding - ) - - def send(self, reply_q, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, - retrier=None): - """Send RPC message with configured retrying - - :param reply_q: String, queue name for sending reply - :param stopwatch: StopWatch, stopwatch object for calculating - allowed timeouts - :param retrier: retrying.Retrier, configured retrier object for sending - message, if None no retrying is performed - """ - - msg_dict, msg_props = self._prepare_message_to_send() - msg_props.correlation_id = self.msg_id - - self._do_send( - exchange=self._pika_engine.rpc_reply_exchange, routing_key=reply_q, - msg_dict=msg_dict, msg_props=msg_props, confirm=True, - mandatory=True, persistent=False, stopwatch=stopwatch, - retrier=retrier - ) diff --git a/oslo_messaging/_drivers/pika_driver/pika_poller.py b/oslo_messaging/_drivers/pika_driver/pika_poller.py deleted file mode 100644 index d34ddfe..0000000 --- a/oslo_messaging/_drivers/pika_driver/pika_poller.py +++ /dev/null @@ -1,538 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from oslo_log import log as logging -from oslo_service import loopingcall - -from oslo_messaging._drivers import base -from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns -from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc -from oslo_messaging._drivers.pika_driver import pika_message as pika_drv_msg - -LOG = logging.getLogger(__name__) - - -class PikaPoller(base.Listener): - """Provides user friendly functionality for RabbitMQ message consuming, - handles low level connectivity problems and restore connection if some - connectivity related problem detected - """ - - def __init__(self, pika_engine, batch_size, batch_timeout, prefetch_count, - incoming_message_class): - """Initialize required fields - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param batch_size: desired number of messages passed to - single on_incoming_callback call - :param batch_timeout: defines how long should we wait for batch_size - messages if we already have some messages waiting for processing - :param prefetch_count: Integer, maximum count of unacknowledged - messages which RabbitMQ broker sends to this consumer - :param incoming_message_class: PikaIncomingMessage, wrapper for - consumed RabbitMQ message - """ - super(PikaPoller, self).__init__(batch_size, batch_timeout, - prefetch_count) - self._pika_engine = pika_engine - self._incoming_message_class = incoming_message_class - - self._connection = None - self._channel = None - self._recover_loopingcall = None - self._lock = threading.RLock() - - self._cur_batch_buffer = None - self._cur_batch_timeout_id = None - - self._started = False - self._closing_connection_by_poller = False - - self._queues_to_consume = None - - def _on_connection_close(self, connection, reply_code, reply_text): - self._deliver_cur_batch() - if self._closing_connection_by_poller: - return - with self._lock: - self._connection = None - self._start_recover_consuming_task() - - def _on_channel_close(self, channel, reply_code, reply_text): - if self._cur_batch_buffer: - self._cur_batch_buffer = [ - message for message in self._cur_batch_buffer - if not message.need_ack() - ] - if self._closing_connection_by_poller: - return - with self._lock: - self._channel = None - self._start_recover_consuming_task() - - def _on_consumer_cancel(self, method_frame): - with self._lock: - if self._queues_to_consume: - consumer_tag = method_frame.method.consumer_tag - for queue_info in self._queues_to_consume: - if queue_info["consumer_tag"] == consumer_tag: - queue_info["consumer_tag"] = None - - self._start_recover_consuming_task() - - def _on_message_no_ack_callback(self, unused, method, properties, body): - """Is called by Pika when message was received from queue listened with - no_ack=True mode - """ - incoming_message = self._incoming_message_class( - self._pika_engine, None, method, properties, body - ) - self._on_incoming_message(incoming_message) - - def _on_message_with_ack_callback(self, unused, method, properties, body): - """Is called by Pika when message was received from queue listened with - no_ack=False mode - """ - incoming_message = self._incoming_message_class( - self._pika_engine, self._channel, method, properties, body - ) - self._on_incoming_message(incoming_message) - - def _deliver_cur_batch(self): - if self._cur_batch_timeout_id is not None: - self._connection.remove_timeout(self._cur_batch_timeout_id) - self._cur_batch_timeout_id = None - if self._cur_batch_buffer: - buf_to_send = self._cur_batch_buffer - self._cur_batch_buffer = None - try: - self.on_incoming_callback(buf_to_send) - except Exception: - LOG.exception("Unexpected exception during incoming delivery") - - def _on_incoming_message(self, incoming_message): - if self._cur_batch_buffer is None: - self._cur_batch_buffer = [incoming_message] - else: - self._cur_batch_buffer.append(incoming_message) - - if len(self._cur_batch_buffer) >= self.batch_size: - self._deliver_cur_batch() - return - - if self._cur_batch_timeout_id is None: - self._cur_batch_timeout_id = self._connection.add_timeout( - self.batch_timeout, self._deliver_cur_batch) - - def _start_recover_consuming_task(self): - """Start async job for checking connection to the broker.""" - if self._recover_loopingcall is None and self._started: - self._recover_loopingcall = ( - loopingcall.DynamicLoopingCall( - self._try_recover_consuming - ) - ) - LOG.info("Starting recover consuming job for listener: %s", self) - self._recover_loopingcall.start() - - def _try_recover_consuming(self): - with self._lock: - try: - if self._started: - self._start_or_recover_consuming() - except pika_drv_exc.EstablishConnectionException as e: - LOG.warning( - "Problem during establishing connection for pika " - "poller %s", e, exc_info=True - ) - return self._pika_engine.host_connection_reconnect_delay - except pika_drv_exc.ConnectionException as e: - LOG.warning( - "Connectivity exception during starting/recovering pika " - "poller %s", e, exc_info=True - ) - except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: - LOG.warning( - "Connectivity exception during starting/recovering pika " - "poller %s", e, exc_info=True - ) - except BaseException: - # NOTE (dukhlov): I preffer to use here BaseException because - # if this method raise such exception LoopingCall stops - # execution Probably it should never happen and Exception - # should be enough but in case of programmer mistake it could - # be and it is potentially hard to catch problem if we will - # stop background task. It is better when it continue to work - # and write a lot of LOG with this error - LOG.exception("Unexpected exception during " - "starting/recovering pika poller") - else: - self._recover_loopingcall = None - LOG.info("Recover consuming job was finished for listener: %s", - self) - raise loopingcall.LoopingCallDone(True) - return 0 - - def _start_or_recover_consuming(self): - """Performs reconnection to the broker. It is unsafe method for - internal use only - """ - if self._connection is None or not self._connection.is_open: - self._connection = self._pika_engine.create_connection( - for_listening=True - ) - self._connection.add_on_close_callback(self._on_connection_close) - self._channel = None - - if self._channel is None or not self._channel.is_open: - if self._queues_to_consume: - for queue_info in self._queues_to_consume: - queue_info["consumer_tag"] = None - - self._channel = self._connection.channel() - self._channel.add_on_close_callback(self._on_channel_close) - self._channel.add_on_cancel_callback(self._on_consumer_cancel) - self._channel.basic_qos(prefetch_count=self.prefetch_size) - - if self._queues_to_consume is None: - self._queues_to_consume = self._declare_queue_binding() - - self._start_consuming() - - def _declare_queue_binding(self): - """Is called by recovering connection logic if target RabbitMQ - exchange and (or) queue do not exist. Should be overridden in child - classes - - :return Dictionary, declared_queue_name -> no_ack_mode - """ - raise NotImplementedError( - "It is base class. Please declare exchanges and queues here" - ) - - def _start_consuming(self): - """Is called by recovering connection logic for starting consumption - of configured RabbitMQ queues - """ - - assert self._queues_to_consume is not None - - try: - for queue_info in self._queues_to_consume: - if queue_info["consumer_tag"] is not None: - continue - no_ack = queue_info["no_ack"] - - on_message_callback = ( - self._on_message_no_ack_callback if no_ack - else self._on_message_with_ack_callback - ) - - queue_info["consumer_tag"] = self._channel.basic_consume( - on_message_callback, queue_info["queue_name"], - no_ack=no_ack - ) - except Exception: - self._queues_to_consume = None - raise - - def _stop_consuming(self): - """Is called by poller's stop logic for stopping consumption - of configured RabbitMQ queues - """ - - assert self._queues_to_consume is not None - - for queue_info in self._queues_to_consume: - consumer_tag = queue_info["consumer_tag"] - if consumer_tag is not None: - self._channel.basic_cancel(consumer_tag) - queue_info["consumer_tag"] = None - - def start(self, on_incoming_callback): - """Starts poller. Should be called before polling to allow message - consuming - - :param on_incoming_callback: callback function to be executed when - listener received messages. Messages should be processed and - acked/nacked by callback - """ - super(PikaPoller, self).start(on_incoming_callback) - - with self._lock: - if self._started: - return - connected = False - try: - self._start_or_recover_consuming() - except pika_drv_exc.EstablishConnectionException as exc: - LOG.warning( - "Can not establish connection during pika poller's " - "start(). %s", exc, exc_info=True - ) - except pika_drv_exc.ConnectionException as exc: - LOG.warning( - "Connectivity problem during pika poller's start(). %s", - exc, exc_info=True - ) - except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as exc: - LOG.warning( - "Connectivity problem during pika poller's start(). %s", - exc, exc_info=True - ) - else: - connected = True - - self._started = True - if not connected: - self._start_recover_consuming_task() - - def stop(self): - """Stops poller. Should be called when polling is not needed anymore to - stop new message consuming. After that it is necessary to poll already - prefetched messages - """ - super(PikaPoller, self).stop() - - with self._lock: - if not self._started: - return - - if self._recover_loopingcall is not None: - self._recover_loopingcall.stop() - self._recover_loopingcall = None - - if (self._queues_to_consume and self._channel and - self._channel.is_open): - try: - self._stop_consuming() - except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as exc: - LOG.warning( - "Connectivity problem detected during consumer " - "cancellation. %s", exc, exc_info=True - ) - self._deliver_cur_batch() - self._started = False - - def cleanup(self): - """Cleanup allocated resources (channel, connection, etc).""" - with self._lock: - if self._connection and self._connection.is_open: - try: - self._closing_connection_by_poller = True - self._connection.close() - self._closing_connection_by_poller = False - except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS: - # expected errors - pass - except Exception: - LOG.exception("Unexpected error during closing connection") - finally: - self._channel = None - self._connection = None - - -class RpcServicePikaPoller(PikaPoller): - """PikaPoller implementation for polling RPC messages. Overrides base - functionality according to RPC specific - """ - def __init__(self, pika_engine, target, batch_size, batch_timeout, - prefetch_count): - """Adds target parameter for declaring RPC specific exchanges and - queues - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param target: Target, oslo.messaging Target object which defines RPC - endpoint - :param batch_size: desired number of messages passed to - single on_incoming_callback call - :param batch_timeout: defines how long should we wait for batch_size - messages if we already have some messages waiting for processing - :param prefetch_count: Integer, maximum count of unacknowledged - messages which RabbitMQ broker sends to this consumer - """ - self._target = target - - super(RpcServicePikaPoller, self).__init__( - pika_engine, batch_size, batch_timeout, prefetch_count, - pika_drv_msg.RpcPikaIncomingMessage - ) - - def _declare_queue_binding(self): - """Overrides base method and perform declaration of RabbitMQ exchanges - and queues which correspond to oslo.messaging RPC target - - :return Dictionary, declared_queue_name -> no_ack_mode - """ - queue_expiration = self._pika_engine.rpc_queue_expiration - - exchange = self._pika_engine.get_rpc_exchange_name( - self._target.exchange - ) - - queues_to_consume = [] - - for no_ack in [True, False]: - queue = self._pika_engine.get_rpc_queue_name( - self._target.topic, None, no_ack - ) - self._pika_engine.declare_queue_binding_by_channel( - channel=self._channel, exchange=exchange, queue=queue, - routing_key=queue, exchange_type='direct', durable=False, - queue_expiration=queue_expiration - ) - queues_to_consume.append( - {"queue_name": queue, "no_ack": no_ack, "consumer_tag": None} - ) - - if self._target.server: - server_queue = self._pika_engine.get_rpc_queue_name( - self._target.topic, self._target.server, no_ack - ) - self._pika_engine.declare_queue_binding_by_channel( - channel=self._channel, exchange=exchange, durable=False, - queue=server_queue, routing_key=server_queue, - exchange_type='direct', queue_expiration=queue_expiration - ) - queues_to_consume.append( - {"queue_name": server_queue, "no_ack": no_ack, - "consumer_tag": None} - ) - - worker_queue = self._pika_engine.get_rpc_queue_name( - self._target.topic, self._target.server, no_ack, True - ) - all_workers_routing_key = self._pika_engine.get_rpc_queue_name( - self._target.topic, "all_workers", no_ack - ) - self._pika_engine.declare_queue_binding_by_channel( - channel=self._channel, exchange=exchange, durable=False, - queue=worker_queue, routing_key=all_workers_routing_key, - exchange_type='direct', queue_expiration=queue_expiration - ) - queues_to_consume.append( - {"queue_name": worker_queue, "no_ack": no_ack, - "consumer_tag": None} - ) - - return queues_to_consume - - -class RpcReplyPikaPoller(PikaPoller): - """PikaPoller implementation for polling RPC reply messages. Overrides - base functionality according to RPC reply specific - """ - def __init__(self, pika_engine, exchange, queue, batch_size, batch_timeout, - prefetch_count): - """Adds exchange and queue parameter for declaring exchange and queue - used for RPC reply delivery - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param exchange: String, exchange name used for RPC reply delivery - :param queue: String, queue name used for RPC reply delivery - :param batch_size: desired number of messages passed to - single on_incoming_callback call - :param batch_timeout: defines how long should we wait for batch_size - messages if we already have some messages waiting for processing - :param prefetch_count: Integer, maximum count of unacknowledged - messages which RabbitMQ broker sends to this consumer - """ - self._exchange = exchange - self._queue = queue - - super(RpcReplyPikaPoller, self).__init__( - pika_engine, batch_size, batch_timeout, prefetch_count, - pika_drv_msg.RpcReplyPikaIncomingMessage - ) - - def _declare_queue_binding(self): - """Overrides base method and perform declaration of RabbitMQ exchange - and queue used for RPC reply delivery - - :return Dictionary, declared_queue_name -> no_ack_mode - """ - self._pika_engine.declare_queue_binding_by_channel( - channel=self._channel, - exchange=self._exchange, queue=self._queue, - routing_key=self._queue, exchange_type='direct', - queue_expiration=self._pika_engine.rpc_queue_expiration, - durable=False - ) - - return [{"queue_name": self._queue, "no_ack": False, - "consumer_tag": None}] - - -class NotificationPikaPoller(PikaPoller): - """PikaPoller implementation for polling Notification messages. Overrides - base functionality according to Notification specific - """ - def __init__(self, pika_engine, targets_and_priorities, - batch_size, batch_timeout, prefetch_count, queue_name=None): - """Adds targets_and_priorities and queue_name parameter - for declaring exchanges and queues used for notification delivery - - :param pika_engine: PikaEngine, shared object with configuration and - shared driver functionality - :param targets_and_priorities: list of (target, priority), defines - default queue names for corresponding notification types - :param batch_size: desired number of messages passed to - single on_incoming_callback call - :param batch_timeout: defines how long should we wait for batch_size - messages if we already have some messages waiting for processing - :param prefetch_count: Integer, maximum count of unacknowledged - messages which RabbitMQ broker sends to this consumer - :param queue: String, alternative queue name used for this poller - instead of default queue name - """ - self._targets_and_priorities = targets_and_priorities - self._queue_name = queue_name - - super(NotificationPikaPoller, self).__init__( - pika_engine, batch_size, batch_timeout, prefetch_count, - pika_drv_msg.PikaIncomingMessage - ) - - def _declare_queue_binding(self): - """Overrides base method and perform declaration of RabbitMQ exchanges - and queues used for notification delivery - - :return Dictionary, declared_queue_name -> no_ack_mode - """ - queues_to_consume = [] - for target, priority in self._targets_and_priorities: - routing_key = '%s.%s' % (target.topic, priority) - queue = self._queue_name or routing_key - self._pika_engine.declare_queue_binding_by_channel( - channel=self._channel, - exchange=( - target.exchange or - self._pika_engine.default_notification_exchange - ), - queue=queue, - routing_key=routing_key, - exchange_type='direct', - queue_expiration=None, - durable=self._pika_engine.notification_persistence, - ) - queues_to_consume.append( - {"queue_name": queue, "no_ack": False, "consumer_tag": None} - ) - - return queues_to_consume diff --git a/oslo_messaging/_drivers/pool.py b/oslo_messaging/_drivers/pool.py deleted file mode 100644 index 681dbef..0000000 --- a/oslo_messaging/_drivers/pool.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections -import sys -import threading - -from oslo_log import log as logging -from oslo_utils import timeutils -import six - -from oslo_messaging._drivers import common - -LOG = logging.getLogger(__name__) - -# TODO(harlowja): remove this when we no longer have to support 2.7 -if sys.version_info[0:2] < (3, 2): - def wait_condition(cond): - # FIXME(markmc): timeout needed to allow keyboard interrupt - # http://bugs.python.org/issue8844 - cond.wait(timeout=1) -else: - def wait_condition(cond): - cond.wait() - - -@six.add_metaclass(abc.ABCMeta) -class Pool(object): - """A thread-safe object pool. - - Modelled after the eventlet.pools.Pool interface, but designed to be safe - when using native threads without the GIL. - - Resizing is not supported. - - """ - - def __init__(self, max_size=4, min_size=2, ttl=1200, on_expire=None): - super(Pool, self).__init__() - self._min_size = min_size - self._max_size = max_size - self._item_ttl = ttl - self._current_size = 0 - self._cond = threading.Condition() - self._items = collections.deque() - self._on_expire = on_expire - - def expire(self): - """Remove expired items from left (the oldest item) to - right (the newest item). - """ - with self._cond: - while len(self._items) > self._min_size: - try: - ttl_watch, item = self._items.popleft() - if ttl_watch.expired(): - self._on_expire and self._on_expire(item) - self._current_size -= 1 - else: - self._items.appendleft((ttl_watch, item)) - return - except IndexError: - break - - def put(self, item): - """Return an item to the pool.""" - with self._cond: - ttl_watch = timeutils.StopWatch(duration=self._item_ttl) - ttl_watch.start() - self._items.append((ttl_watch, item)) - self._cond.notify() - - def get(self): - """Return an item from the pool, when one is available. - - This may cause the calling thread to block. - """ - with self._cond: - while True: - try: - ttl_watch, item = self._items.pop() - self.expire() - return item - except IndexError: - pass - - if self._current_size < self._max_size: - self._current_size += 1 - break - - wait_condition(self._cond) - - # We've grabbed a slot and dropped the lock, now do the creation - try: - return self.create() - except Exception: - with self._cond: - self._current_size -= 1 - raise - - def iter_free(self): - """Iterate over free items.""" - while True: - try: - _, item = self._items.pop() - yield item - except IndexError: - raise StopIteration - - @abc.abstractmethod - def create(self): - """Construct a new item.""" - - -class ConnectionPool(Pool): - """Class that implements a Pool of Connections.""" - - def __init__(self, conf, max_size, min_size, ttl, url, connection_cls): - self.connection_cls = connection_cls - self.conf = conf - self.url = url - super(ConnectionPool, self).__init__(max_size, min_size, ttl, - self._on_expire) - - def _on_expire(self, connection): - connection.close() - LOG.debug("Idle connection has expired and been closed." - " Pool size: %d" % len(self._items)) - - def create(self, purpose=common.PURPOSE_SEND): - LOG.debug('Pool creating new connection') - return self.connection_cls(self.conf, self.url, purpose) - - def empty(self): - for item in self.iter_free(): - item.close() diff --git a/oslo_messaging/_drivers/zmq_driver/__init__.py b/oslo_messaging/_drivers/zmq_driver/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/client/__init__.py b/oslo_messaging/_drivers/zmq_driver/client/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/client/publishers/__init__.py b/oslo_messaging/_drivers/zmq_driver/client/publishers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/__init__.py b/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_base.py b/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_base.py deleted file mode 100644 index 4a5eba4..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_base.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -from concurrent import futures -import logging - -import retrying - -import oslo_messaging -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers.zmq_driver.client.publishers \ - import zmq_publisher_base -from oslo_messaging._drivers.zmq_driver.client import zmq_sockets_manager -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._i18n import _LE - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class DealerPublisherBase(zmq_publisher_base.PublisherBase): - """Abstract DEALER-publisher.""" - - def __init__(self, conf, matchmaker, sender, receiver): - sockets_manager = zmq_sockets_manager.SocketsManager( - conf, matchmaker, zmq.ROUTER, zmq.DEALER - ) - super(DealerPublisherBase, self).__init__(sockets_manager, sender, - receiver) - - @staticmethod - def _check_pattern(request, supported_pattern): - if request.msg_type != supported_pattern: - raise zmq_publisher_base.UnsupportedSendPattern( - zmq_names.message_type_str(request.msg_type) - ) - - @staticmethod - def _raise_timeout(request): - raise oslo_messaging.MessagingTimeout( - "Timeout %(tout)s seconds was reached for message %(msg_id)s" % - {"tout": request.timeout, "msg_id": request.message_id} - ) - - @abc.abstractmethod - def _connect_socket(self, request): - pass - - def _recv_reply(self, request): - reply_future, = self.receiver.track_request(request) - - try: - _, reply = reply_future.result(timeout=request.timeout) - except AssertionError: - LOG.error(_LE("Message format error in reply for %s"), - request.message_id) - return None - except futures.TimeoutError: - self._raise_timeout(request) - finally: - self.receiver.untrack_request(request) - - if reply.failure: - raise rpc_common.deserialize_remote_exception( - reply.failure, request.allowed_remote_exmods - ) - else: - return reply.reply_body - - def send_call(self, request): - self._check_pattern(request, zmq_names.CALL_TYPE) - - try: - socket = self._connect_socket(request) - except retrying.RetryError: - self._raise_timeout(request) - - self.sender.send(socket, request) - self.receiver.register_socket(socket) - return self._recv_reply(request) - - @abc.abstractmethod - def _send_non_blocking(self, request): - pass - - def send_cast(self, request): - self._check_pattern(request, zmq_names.CAST_TYPE) - self._send_non_blocking(request) - - def send_fanout(self, request): - self._check_pattern(request, zmq_names.CAST_FANOUT_TYPE) - self._send_non_blocking(request) - - def send_notify(self, request): - self._check_pattern(request, zmq_names.NOTIFY_TYPE) - self._send_non_blocking(request) diff --git a/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_direct.py b/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_direct.py deleted file mode 100644 index 56d8b49..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_direct.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import retrying - -from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ - import zmq_dealer_publisher_base -from oslo_messaging._drivers.zmq_driver.client import zmq_receivers -from oslo_messaging._drivers.zmq_driver.client import zmq_senders -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class DealerPublisherDirect(zmq_dealer_publisher_base.DealerPublisherBase): - """DEALER-publisher using direct connections.""" - - def __init__(self, conf, matchmaker): - sender = zmq_senders.RequestSenderDirect(conf) - receiver = zmq_receivers.ReplyReceiverDirect(conf) - super(DealerPublisherDirect, self).__init__(conf, matchmaker, sender, - receiver) - - def _connect_socket(self, request): - return self.sockets_manager.get_socket(request.target) - - def _send_non_blocking(self, request): - try: - socket = self._connect_socket(request) - except retrying.RetryError: - return - - if request.msg_type in zmq_names.MULTISEND_TYPES: - for _ in range(socket.connections_count()): - self.sender.send(socket, request) - else: - self.sender.send(socket, request) diff --git a/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_proxy.py b/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_proxy.py deleted file mode 100644 index fb10ce7..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_proxy.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import retrying - -from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ - import zmq_dealer_publisher_base -from oslo_messaging._drivers.zmq_driver.client import zmq_receivers -from oslo_messaging._drivers.zmq_driver.client import zmq_routing_table -from oslo_messaging._drivers.zmq_driver.client import zmq_senders -from oslo_messaging._drivers.zmq_driver import zmq_address -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._drivers.zmq_driver import zmq_updater - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class DealerPublisherProxy(zmq_dealer_publisher_base.DealerPublisherBase): - """DEALER-publisher via proxy.""" - - def __init__(self, conf, matchmaker): - sender = zmq_senders.RequestSenderProxy(conf) - receiver = zmq_receivers.ReplyReceiverProxy(conf) - super(DealerPublisherProxy, self).__init__(conf, matchmaker, sender, - receiver) - self.socket = self.sockets_manager.get_socket_to_publishers() - self.routing_table = zmq_routing_table.RoutingTable(self.conf, - self.matchmaker) - self.connection_updater = \ - PublisherConnectionUpdater(self.conf, self.matchmaker, self.socket) - - def _connect_socket(self, request): - return self.socket - - def send_call(self, request): - try: - request.routing_key = \ - self.routing_table.get_routable_host(request.target) - except retrying.RetryError: - self._raise_timeout(request) - return super(DealerPublisherProxy, self).send_call(request) - - def _get_routing_keys(self, request): - try: - if request.msg_type in zmq_names.DIRECT_TYPES: - return [self.routing_table.get_routable_host(request.target)] - else: - return \ - [zmq_address.target_to_subscribe_filter(request.target)] \ - if self.conf.oslo_messaging_zmq.use_pub_sub else \ - self.routing_table.get_all_hosts(request.target) - except retrying.RetryError: - return [] - - def _send_non_blocking(self, request): - for routing_key in self._get_routing_keys(request): - request.routing_key = routing_key - self.sender.send(self.socket, request) - - def cleanup(self): - super(DealerPublisherProxy, self).cleanup() - self.connection_updater.stop() - self.socket.close() - - -class PublisherConnectionUpdater(zmq_updater.ConnectionUpdater): - - def _update_connection(self): - publishers = self.matchmaker.get_publishers() - for pub_address, router_address in publishers: - self.socket.connect_to_host(router_address) diff --git a/oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_publisher_base.py b/oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_publisher_base.py deleted file mode 100644 index 9da0c05..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_publisher_base.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import logging - -import six - -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._i18n import _LE - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class UnsupportedSendPattern(rpc_common.RPCException): - - """Exception to raise from publishers in case of unsupported - sending pattern called. - """ - - def __init__(self, pattern_name): - """Construct exception object - - :param pattern_name: Message type name from zmq_names - :type pattern_name: str - """ - errmsg = _LE("Sending pattern %s is unsupported.") % pattern_name - super(UnsupportedSendPattern, self).__init__(errmsg) - - -@six.add_metaclass(abc.ABCMeta) -class PublisherBase(object): - - """Abstract publisher class - - Each publisher from zmq-driver client should implement - this interface to serve as a messages publisher. - - Publisher can send request objects from zmq_request. - """ - - def __init__(self, sockets_manager, sender, receiver): - - """Construct publisher - - Accept sockets manager, sender and receiver objects. - - :param sockets_manager: sockets manager object - :type sockets_manager: zmq_sockets_manager.SocketsManager - :param senders: request sender object - :type senders: zmq_senders.RequestSender - :param receiver: reply receiver object - :type receiver: zmq_receivers.ReplyReceiver - """ - self.sockets_manager = sockets_manager - self.conf = sockets_manager.conf - self.matchmaker = sockets_manager.matchmaker - self.sender = sender - self.receiver = receiver - - @abc.abstractmethod - def send_call(self, request): - pass - - @abc.abstractmethod - def send_cast(self, request): - pass - - @abc.abstractmethod - def send_fanout(self, request): - pass - - @abc.abstractmethod - def send_notify(self, request): - pass - - def cleanup(self): - """Cleanup publisher. Close allocated connections.""" - self.receiver.stop() - self.sockets_manager.cleanup() diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_client.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_client.py deleted file mode 100644 index 0ec27e9..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_client.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_messaging._drivers import common -from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ - import zmq_dealer_publisher_direct -from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ - import zmq_dealer_publisher_proxy -from oslo_messaging._drivers.zmq_driver.client import zmq_client_base -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names - -zmq = zmq_async.import_zmq() - - -class WrongClientException(common.RPCException): - """Raised if client type doesn't match configuration""" - - -class ZmqClientMixDirectPubSub(zmq_client_base.ZmqClientBase): - """Client for using with direct connections and fanout over proxy: - - use_pub_sub = true - use_router_proxy = false - - """ - - def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None): - - if conf.oslo_messaging_zmq.use_router_proxy or not \ - conf.oslo_messaging_zmq.use_pub_sub: - raise WrongClientException() - - publisher_direct = \ - zmq_dealer_publisher_direct.DealerPublisherDirect(conf, matchmaker) - - publisher_proxy = \ - zmq_dealer_publisher_proxy.DealerPublisherProxy(conf, matchmaker) - - super(ZmqClientMixDirectPubSub, self).__init__( - conf, matchmaker, allowed_remote_exmods, - publishers={ - zmq_names.CAST_FANOUT_TYPE: publisher_proxy, - zmq_names.NOTIFY_TYPE: publisher_proxy, - "default": publisher_direct - } - ) - - -class ZmqClientDirect(zmq_client_base.ZmqClientBase): - """This kind of client (publishers combination) is to be used for - direct connections only: - - use_pub_sub = false - use_router_proxy = false - """ - - def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None): - - if conf.oslo_messaging_zmq.use_pub_sub or \ - conf.oslo_messaging_zmq.use_router_proxy: - raise WrongClientException() - - publisher = \ - zmq_dealer_publisher_direct.DealerPublisherDirect(conf, matchmaker) - - super(ZmqClientDirect, self).__init__( - conf, matchmaker, allowed_remote_exmods, - publishers={"default": publisher} - ) - - -class ZmqClientProxy(zmq_client_base.ZmqClientBase): - """Client for using with proxy: - - use_pub_sub = true - use_router_proxy = true - or - use_pub_sub = false - use_router_proxy = true - """ - - def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None): - - if not conf.oslo_messaging_zmq.use_router_proxy: - raise WrongClientException() - - publisher = \ - zmq_dealer_publisher_proxy.DealerPublisherProxy(conf, matchmaker) - - super(ZmqClientProxy, self).__init__( - conf, matchmaker, allowed_remote_exmods, - publishers={"default": publisher} - ) diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_client_base.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_client_base.py deleted file mode 100644 index 4643ff3..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_client_base.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_messaging._drivers.zmq_driver.client import zmq_request -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names - -zmq = zmq_async.import_zmq() - - -class ZmqClientBase(object): - - def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None, - publishers=None): - self.conf = conf - self.matchmaker = matchmaker - self.allowed_remote_exmods = allowed_remote_exmods or [] - - self.publishers = publishers - self.call_publisher = publishers.get(zmq_names.CALL_TYPE, - publishers["default"]) - self.cast_publisher = publishers.get(zmq_names.CAST_TYPE, - publishers["default"]) - self.fanout_publisher = publishers.get(zmq_names.CAST_FANOUT_TYPE, - publishers["default"]) - self.notify_publisher = publishers.get(zmq_names.NOTIFY_TYPE, - publishers["default"]) - - def send_call(self, target, context, message, timeout=None, retry=None): - request = zmq_request.CallRequest( - target, context=context, message=message, retry=retry, - timeout=timeout, allowed_remote_exmods=self.allowed_remote_exmods - ) - return self.call_publisher.send_call(request) - - def send_cast(self, target, context, message, retry=None): - request = zmq_request.CastRequest( - target, context=context, message=message, retry=retry - ) - self.cast_publisher.send_cast(request) - - def send_fanout(self, target, context, message, retry=None): - request = zmq_request.FanoutRequest( - target, context=context, message=message, retry=retry - ) - self.fanout_publisher.send_fanout(request) - - def send_notify(self, target, context, message, version, retry=None): - request = zmq_request.NotificationRequest( - target, context=context, message=message, retry=retry, - version=version - ) - self.notify_publisher.send_notify(request) - - def cleanup(self): - cleaned = set() - for publisher in self.publishers.values(): - if publisher not in cleaned: - publisher.cleanup() - cleaned.add(publisher) diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_receivers.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_receivers.py deleted file mode 100644 index 96ebead..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_receivers.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import logging -import threading - -import futurist -import six - -from oslo_messaging._drivers.zmq_driver.client import zmq_response -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -@six.add_metaclass(abc.ABCMeta) -class ReceiverBase(object): - """Base response receiving interface.""" - - def __init__(self, conf): - self.conf = conf - self._lock = threading.Lock() - self._requests = {} - self._poller = zmq_async.get_poller() - self._executor = zmq_async.get_executor(method=self._run_loop) - self._executor.execute() - - @abc.abstractproperty - def message_types(self): - """A list of supported incoming response types.""" - - def register_socket(self, socket): - """Register a socket for receiving data.""" - self._poller.register(socket, recv_method=self.recv_response) - - @abc.abstractmethod - def recv_response(self, socket): - """Receive a response and return a tuple of the form - (reply_id, message_type, message_id, response). - """ - - def track_request(self, request): - """Track a request via already registered sockets and return - a list of futures for monitoring all types of responses. - """ - futures = [] - for message_type in self.message_types: - future = futurist.Future() - self._set_future(request.message_id, message_type, future) - futures.append(future) - return futures - - def untrack_request(self, request): - """Untrack a request and stop monitoring any responses.""" - for message_type in self.message_types: - self._pop_future(request.message_id, message_type) - - def stop(self): - self._poller.close() - self._executor.stop() - - def _get_future(self, message_id, message_type): - with self._lock: - return self._requests.get((message_id, message_type)) - - def _set_future(self, message_id, message_type, future): - with self._lock: - self._requests[(message_id, message_type)] = future - - def _pop_future(self, message_id, message_type): - with self._lock: - return self._requests.pop((message_id, message_type), None) - - def _run_loop(self): - data, socket = self._poller.poll( - timeout=self.conf.oslo_messaging_zmq.rpc_poll_timeout) - if data is None: - return - reply_id, message_type, message_id, response = data - assert message_type in self.message_types, \ - "%s is not supported!" % zmq_names.message_type_str(message_type) - future = self._get_future(message_id, message_type) - if future is not None: - LOG.debug("Received %(msg_type)s for %(msg_id)s", - {"msg_type": zmq_names.message_type_str(message_type), - "msg_id": message_id}) - future.set_result((reply_id, response)) - - -class AckReceiver(ReceiverBase): - - message_types = (zmq_names.ACK_TYPE,) - - -class ReplyReceiver(ReceiverBase): - - message_types = (zmq_names.REPLY_TYPE,) - - -class ReplyReceiverProxy(ReplyReceiver): - - def recv_response(self, socket): - empty = socket.recv() - assert empty == b'', "Empty expected!" - reply_id = socket.recv() - assert reply_id is not None, "Reply ID expected!" - message_type = int(socket.recv()) - assert message_type == zmq_names.REPLY_TYPE, "Reply expected!" - message_id = socket.recv() - raw_reply = socket.recv_loaded() - assert isinstance(raw_reply, dict), "Dict expected!" - reply = zmq_response.Response(**raw_reply) - LOG.debug("Received reply for %s", message_id) - return reply_id, message_type, message_id, reply - - -class ReplyReceiverDirect(ReplyReceiver): - - def recv_response(self, socket): - empty = socket.recv() - assert empty == b'', "Empty expected!" - raw_reply = socket.recv_loaded() - assert isinstance(raw_reply, dict), "Dict expected!" - reply = zmq_response.Response(**raw_reply) - LOG.debug("Received reply for %s", reply.message_id) - return reply.reply_id, reply.msg_type, reply.message_id, reply - - -class AckAndReplyReceiver(ReceiverBase): - - message_types = (zmq_names.ACK_TYPE, zmq_names.REPLY_TYPE) diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_request.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_request.py deleted file mode 100644 index b3f8aae..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_request.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import logging -import uuid - -import six - -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._i18n import _LE - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -@six.add_metaclass(abc.ABCMeta) -class Request(object): - - """Zmq request abstract class - - Represents socket (publisher) independent data object to publish. - Request object should contain all needed information for a publisher - to publish it, for instance: message payload, target, timeout - and retries etc. - """ - - def __init__(self, target, context=None, message=None, retry=None): - - """Construct request object - - :param target: Message destination target - :type target: oslo_messaging.Target - :param context: Message context - :type context: dict - :param message: Message payload to pass - :type message: dict - :param retry: an optional default connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - - if self.msg_type not in zmq_names.MESSAGE_TYPES: - raise RuntimeError("Unknown message type!") - - self.target = target - self.context = context - self.message = message - - self.retry = retry - if not isinstance(retry, int) and retry is not None: - raise ValueError( - "retry must be an integer, not {0}".format(type(retry))) - - self.message_id = str(uuid.uuid1()) - - @abc.abstractproperty - def msg_type(self): - """ZMQ message type""" - - -class RpcRequest(Request): - - def __init__(self, *args, **kwargs): - message = kwargs.get("message") - if message['method'] is None: - errmsg = _LE("No method specified for RPC call") - LOG.error(_LE("No method specified for RPC call")) - raise KeyError(errmsg) - - super(RpcRequest, self).__init__(*args, **kwargs) - - -class CallRequest(RpcRequest): - - msg_type = zmq_names.CALL_TYPE - - def __init__(self, *args, **kwargs): - self.allowed_remote_exmods = kwargs.pop("allowed_remote_exmods") - - self.timeout = kwargs.pop("timeout") - if self.timeout is None: - raise ValueError("Timeout should be specified for a RPC call!") - elif not isinstance(self.timeout, int): - raise ValueError( - "timeout must be an integer, not {0}" - .format(type(self.timeout))) - - super(CallRequest, self).__init__(*args, **kwargs) - - -class CastRequest(RpcRequest): - - msg_type = zmq_names.CAST_TYPE - - -class FanoutRequest(RpcRequest): - - msg_type = zmq_names.CAST_FANOUT_TYPE - - -class NotificationRequest(Request): - - msg_type = zmq_names.NOTIFY_TYPE - - def __init__(self, *args, **kwargs): - self.version = kwargs.pop("version") - super(NotificationRequest, self).__init__(*args, **kwargs) diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_response.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_response.py deleted file mode 100644 index 35c38a8..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_response.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_messaging._drivers.zmq_driver import zmq_names - - -class Response(object): - - def __init__(self, msg_type=None, message_id=None, - reply_id=None, reply_body=None, failure=None): - - self._msg_type = msg_type - self._message_id = message_id - self._reply_id = reply_id - self._reply_body = reply_body - self._failure = failure - - @property - def msg_type(self): - return self._msg_type - - @property - def message_id(self): - return self._message_id - - @property - def reply_id(self): - return self._reply_id - - @property - def reply_body(self): - return self._reply_body - - @property - def failure(self): - return self._failure - - def to_dict(self): - return {zmq_names.FIELD_MSG_TYPE: self._msg_type, - zmq_names.FIELD_MSG_ID: self._message_id, - zmq_names.FIELD_REPLY_ID: self._reply_id, - zmq_names.FIELD_REPLY_BODY: self._reply_body, - zmq_names.FIELD_FAILURE: self._failure} - - def __str__(self): - return str(self.to_dict()) diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_routing_table.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_routing_table.py deleted file mode 100644 index 16de0bc..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_routing_table.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names - -zmq = zmq_async.import_zmq() - - -class RoutingTable(object): - """This class implements local routing-table cache - taken from matchmaker. Its purpose is to give the next routable - host id (remote DEALER's id) by request for specific target in - round-robin fashion. - """ - - def __init__(self, conf, matchmaker): - self.conf = conf - self.matchmaker = matchmaker - self.routing_table = {} - self.routable_hosts = {} - - def get_all_hosts(self, target): - self._update_routing_table(target) - return list(self.routable_hosts.get(str(target)) or []) - - def get_routable_host(self, target): - self._update_routing_table(target) - hosts_for_target = self.routable_hosts[str(target)] - host = hosts_for_target.pop(0) - if not hosts_for_target: - self._renew_routable_hosts(target) - return host - - def _is_tm_expired(self, tm): - return 0 <= self.conf.oslo_messaging_zmq.zmq_target_expire \ - <= time.time() - tm - - def _update_routing_table(self, target): - routing_record = self.routing_table.get(str(target)) - if routing_record is None: - self._fetch_hosts(target) - self._renew_routable_hosts(target) - elif self._is_tm_expired(routing_record[1]): - self._fetch_hosts(target) - - def _fetch_hosts(self, target): - self.routing_table[str(target)] = (self.matchmaker.get_hosts( - target, zmq_names.socket_type_str(zmq.DEALER)), time.time()) - - def _renew_routable_hosts(self, target): - hosts, _ = self.routing_table[str(target)] - self.routable_hosts[str(target)] = list(hosts) diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_senders.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_senders.py deleted file mode 100644 index 3b83d9a..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_senders.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import logging - -import six - -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -@six.add_metaclass(abc.ABCMeta) -class SenderBase(object): - """Base request/ack/reply sending interface.""" - - def __init__(self, conf): - self.conf = conf - - @abc.abstractmethod - def send(self, socket, message): - pass - - -class RequestSender(SenderBase): - pass - - -class ReplySender(SenderBase): - pass - - -class RequestSenderProxy(RequestSender): - - def send(self, socket, request): - socket.send(b'', zmq.SNDMORE) - socket.send(six.b(str(request.msg_type)), zmq.SNDMORE) - socket.send(six.b(request.routing_key), zmq.SNDMORE) - socket.send(six.b(request.message_id), zmq.SNDMORE) - socket.send_dumped(request.context, zmq.SNDMORE) - socket.send_dumped(request.message) - - LOG.debug("->[proxy:%(addr)s] Sending %(msg_type)s message " - "%(msg_id)s to target %(target)s", - {"addr": list(socket.connections), - "msg_type": zmq_names.message_type_str(request.msg_type), - "msg_id": request.message_id, - "target": request.target}) - - -class ReplySenderProxy(ReplySender): - - def send(self, socket, reply): - LOG.debug("Replying to %s", reply.message_id) - - assert reply.msg_type == zmq_names.REPLY_TYPE, "Reply expected!" - - socket.send(b'', zmq.SNDMORE) - socket.send(six.b(str(reply.msg_type)), zmq.SNDMORE) - socket.send(reply.reply_id, zmq.SNDMORE) - socket.send(reply.message_id, zmq.SNDMORE) - socket.send_dumped(reply.to_dict()) - - -class RequestSenderDirect(RequestSender): - - def send(self, socket, request): - socket.send(b'', zmq.SNDMORE) - socket.send(six.b(str(request.msg_type)), zmq.SNDMORE) - socket.send_string(request.message_id, zmq.SNDMORE) - socket.send_dumped(request.context, zmq.SNDMORE) - socket.send_dumped(request.message) - - LOG.debug("Sending %(msg_type)s message %(msg_id)s to " - "target %(target)s", - {"msg_type": zmq_names.message_type_str(request.msg_type), - "msg_id": request.message_id, - "target": request.target}) - - -class ReplySenderDirect(ReplySender): - - def send(self, socket, reply): - LOG.debug("Replying to %s", reply.message_id) - - assert reply.msg_type == zmq_names.REPLY_TYPE, "Reply expected!" - - socket.send(reply.reply_id, zmq.SNDMORE) - socket.send(b'', zmq.SNDMORE) - socket.send_dumped(reply.to_dict()) diff --git a/oslo_messaging/_drivers/zmq_driver/client/zmq_sockets_manager.py b/oslo_messaging/_drivers/zmq_driver/client/zmq_sockets_manager.py deleted file mode 100644 index aa82b84..0000000 --- a/oslo_messaging/_drivers/zmq_driver/client/zmq_sockets_manager.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._drivers.zmq_driver import zmq_socket - -zmq = zmq_async.import_zmq() - - -class SocketsManager(object): - - def __init__(self, conf, matchmaker, listener_type, socket_type): - self.conf = conf - self.matchmaker = matchmaker - self.listener_type = listener_type - self.socket_type = socket_type - self.zmq_context = zmq.Context() - self.outbound_sockets = {} - self.socket_to_publishers = None - self.socket_to_routers = None - - def get_hosts(self, target): - return self.matchmaker.get_hosts( - target, zmq_names.socket_type_str(self.listener_type)) - - @staticmethod - def _key_from_target(target): - return target.topic if target.fanout else str(target) - - def _get_hosts_and_connect(self, socket, target): - hosts = self.get_hosts(target) - self._connect_to_hosts(socket, target, hosts) - - def _track_socket(self, socket, target): - key = self._key_from_target(target) - self.outbound_sockets[key] = (socket, time.time()) - - def _connect_to_hosts(self, socket, target, hosts): - for host in hosts: - socket.connect_to_host(host) - self._track_socket(socket, target) - - def _check_for_new_hosts(self, target): - key = self._key_from_target(target) - socket, tm = self.outbound_sockets[key] - if 0 <= self.conf.oslo_messaging_zmq.zmq_target_expire \ - <= time.time() - tm: - self._get_hosts_and_connect(socket, target) - return socket - - def get_socket(self, target): - key = self._key_from_target(target) - if key in self.outbound_sockets: - socket = self._check_for_new_hosts(target) - else: - socket = zmq_socket.ZmqSocket(self.conf, self.zmq_context, - self.socket_type, immediate=False) - self._get_hosts_and_connect(socket, target) - return socket - - def get_socket_to_publishers(self): - if self.socket_to_publishers is not None: - return self.socket_to_publishers - self.socket_to_publishers = zmq_socket.ZmqSocket( - self.conf, self.zmq_context, self.socket_type) - publishers = self.matchmaker.get_publishers() - for pub_address, router_address in publishers: - self.socket_to_publishers.connect_to_host(router_address) - return self.socket_to_publishers - - def get_socket_to_routers(self): - if self.socket_to_routers is not None: - return self.socket_to_routers - self.socket_to_routers = zmq_socket.ZmqSocket( - self.conf, self.zmq_context, self.socket_type) - routers = self.matchmaker.get_routers() - for router_address in routers: - self.socket_to_routers.connect_to_host(router_address) - return self.socket_to_routers - - def cleanup(self): - for socket, tm in self.outbound_sockets.values(): - socket.close() diff --git a/oslo_messaging/_drivers/zmq_driver/matchmaker/__init__.py b/oslo_messaging/_drivers/zmq_driver/matchmaker/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/matchmaker/base.py b/oslo_messaging/_drivers/zmq_driver/matchmaker/base.py deleted file mode 100644 index 65ade7a..0000000 --- a/oslo_messaging/_drivers/zmq_driver/matchmaker/base.py +++ /dev/null @@ -1,167 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections - -import six - -from oslo_messaging._drivers.zmq_driver import zmq_address - - -@six.add_metaclass(abc.ABCMeta) -class MatchMakerBase(object): - - def __init__(self, conf, *args, **kwargs): - super(MatchMakerBase, self).__init__() - self.conf = conf - self.url = kwargs.get('url') - - @abc.abstractmethod - def register_publisher(self, hostname): - """Register publisher on nameserver. - - This works for PUB-SUB only - - :param hostname: host for the topic in "host:port" format - host for back-chatter in "host:port" format - :type hostname: tuple - """ - - @abc.abstractmethod - def unregister_publisher(self, hostname): - """Unregister publisher on nameserver. - - This works for PUB-SUB only - - :param hostname: host for the topic in "host:port" format - host for back-chatter in "host:port" format - :type hostname: tuple - """ - - @abc.abstractmethod - def get_publishers(self): - """Get all publisher-hosts from nameserver. - - :returns: a list of tuples of strings "hostname:port" hosts - """ - - @abc.abstractmethod - def register_router(self, hostname): - """Register router on the nameserver. - - This works for ROUTER proxy only - - :param hostname: host for the topic in "host:port" format - :type hostname: string - """ - - @abc.abstractmethod - def unregister_router(self, hostname): - """Unregister router on the nameserver. - - This works for ROUTER proxy only - - :param hostname: host for the topic in "host:port" format - :type hostname: string - """ - - @abc.abstractmethod - def get_routers(self): - """Get all router-hosts from nameserver. - - :returns: a list of strings "hostname:port" hosts - """ - - @abc.abstractmethod - def register(self, target, hostname, listener_type, expire=-1): - """Register target on nameserver. - If record already exists and has expiration timeout it will be - updated. Existing records without timeout will stay untouched - - :param target: the target for host - :type target: Target - :param hostname: host for the topic in "host:port" format - :type hostname: String - :param listener_type: Listener socket type ROUTER, SUB etc. - :type listener_type: String - :param expire: Record expiration timeout - :type expire: int - """ - - @abc.abstractmethod - def unregister(self, target, hostname, listener_type): - """Unregister target from nameserver. - - :param target: the target for host - :type target: Target - :param hostname: host for the topic in "host:port" format - :type hostname: String - :param listener_type: Listener socket type ROUTER, SUB etc. - :type listener_type: String - """ - - @abc.abstractmethod - def get_hosts(self, target, listener_type): - """Get all hosts from nameserver by target. - - :param target: the default target for invocations - :type target: Target - :returns: a list of "hostname:port" hosts - """ - - -class DummyMatchMaker(MatchMakerBase): - - def __init__(self, conf, *args, **kwargs): - super(DummyMatchMaker, self).__init__(conf, *args, **kwargs) - - self._cache = collections.defaultdict(list) - self._publishers = set() - self._routers = set() - - def register_publisher(self, hostname): - if hostname not in self._publishers: - self._publishers.add(hostname) - - def unregister_publisher(self, hostname): - if hostname in self._publishers: - self._publishers.remove(hostname) - - def get_publishers(self): - return list(self._publishers) - - def register_router(self, hostname): - if hostname not in self._routers: - self._routers.add(hostname) - - def unregister_router(self, hostname): - if hostname in self._routers: - self._routers.remove(hostname) - - def get_routers(self): - return list(self._routers) - - def register(self, target, hostname, listener_type, expire=-1): - key = zmq_address.target_to_key(target, listener_type) - if hostname not in self._cache[key]: - self._cache[key].append(hostname) - - def unregister(self, target, hostname, listener_type): - key = zmq_address.target_to_key(target, listener_type) - if hostname in self._cache[key]: - self._cache[key].remove(hostname) - - def get_hosts(self, target, listener_type): - key = zmq_address.target_to_key(target, listener_type) - return self._cache[key] diff --git a/oslo_messaging/_drivers/zmq_driver/matchmaker/matchmaker_redis.py b/oslo_messaging/_drivers/zmq_driver/matchmaker/matchmaker_redis.py deleted file mode 100644 index 440c00b..0000000 --- a/oslo_messaging/_drivers/zmq_driver/matchmaker/matchmaker_redis.py +++ /dev/null @@ -1,204 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import logging - -from oslo_config import cfg -from oslo_utils import importutils - -from oslo_messaging._drivers.zmq_driver.matchmaker import base -from oslo_messaging._drivers.zmq_driver import zmq_address -from retrying import retry - -redis = importutils.try_import('redis') -redis_sentinel = importutils.try_import('redis.sentinel') -LOG = logging.getLogger(__name__) - - -matchmaker_redis_opts = [ - cfg.StrOpt('host', - default='127.0.0.1', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='Host to locate redis.'), - cfg.PortOpt('port', - default=6379, - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='Use this port to connect to redis host.'), - cfg.StrOpt('password', - default='', - secret=True, - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='Password for Redis server (optional).'), - cfg.ListOpt('sentinel_hosts', - default=[], - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - help='List of Redis Sentinel hosts (fault tolerance mode) e.g.\ - [host:port, host1:port ... ]'), - cfg.StrOpt('sentinel_group_name', - default='oslo-messaging-zeromq', - help='Redis replica set name.'), - cfg.IntOpt('wait_timeout', - default=5000, - help='Time in ms to wait between connection attempts.'), - cfg.IntOpt('check_timeout', - default=60000, - help='Time in ms to wait before the transaction is killed.'), - cfg.IntOpt('socket_timeout', - default=10000, - help='Timeout in ms on blocking socket operations'), -] - -_PUBLISHERS_KEY = "PUBLISHERS" -_ROUTERS_KEY = "ROUTERS" -_RETRY_METHODS = ("get_hosts", "get_publishers", "get_routers") - - -def retry_if_connection_error(ex): - return isinstance(ex, redis.ConnectionError) - - -def retry_if_empty(hosts): - return not hosts - - -def apply_retrying(obj, cfg): - for attr_name, attr in inspect.getmembers(obj): - if not (inspect.ismethod(attr) or inspect.isfunction(attr)): - continue - if attr_name in _RETRY_METHODS: - setattr( - obj, - attr_name, - retry( - wait_fixed=cfg.matchmaker_redis.wait_timeout, - stop_max_delay=cfg.matchmaker_redis.check_timeout, - retry_on_exception=retry_if_connection_error, - retry_on_result=retry_if_empty - )(attr)) - - -class RedisMatchMaker(base.MatchMakerBase): - - def __init__(self, conf, *args, **kwargs): - super(RedisMatchMaker, self).__init__(conf, *args, **kwargs) - self.conf.register_opts(matchmaker_redis_opts, "matchmaker_redis") - - self.sentinel_hosts = self._extract_sentinel_options() - if not self.sentinel_hosts: - self.standalone_redis = self._extract_standalone_redis_options() - self._redis = redis.StrictRedis( - host=self.standalone_redis["host"], - port=self.standalone_redis["port"], - password=self.standalone_redis["password"] - ) - else: - socket_timeout = self.conf.matchmaker_redis.socket_timeout / 1000. - sentinel = redis.sentinel.Sentinel( - sentinels=self.sentinel_hosts, - socket_timeout=socket_timeout - ) - - self._redis = sentinel.master_for( - self.conf.matchmaker_redis.sentinel_group_name, - socket_timeout=socket_timeout - ) - apply_retrying(self, self.conf) - - def _extract_sentinel_options(self): - if self.url and self.url.hosts: - if len(self.url.hosts) > 1: - return [(host.hostname, host.port) for host in self.url.hosts] - elif self.conf.matchmaker_redis.sentinel_hosts: - s = self.conf.matchmaker_redis.sentinel_hosts - return [tuple(i.split(":")) for i in s] - - def _extract_standalone_redis_options(self): - if self.url and self.url.hosts: - redis_host = self.url.hosts[0] - return {"host": redis_host.hostname, - "port": redis_host.port, - "password": redis_host.password} - else: - return {"host": self.conf.matchmaker_redis.host, - "port": self.conf.matchmaker_redis.port, - "password": self.conf.matchmaker_redis.password} - - def _add_key_with_expire(self, key, value, expire): - self._redis.sadd(key, value) - if expire > 0: - self._redis.expire(key, expire) - - def register_publisher(self, hostname, expire=-1): - host_str = ",".join(hostname) - self._add_key_with_expire(_PUBLISHERS_KEY, host_str, expire) - - def unregister_publisher(self, hostname): - host_str = ",".join(hostname) - self._redis.srem(_PUBLISHERS_KEY, host_str) - - def get_publishers(self): - hosts = [] - hosts.extend([tuple(host_str.split(",")) - for host_str in - self._get_hosts_by_key(_PUBLISHERS_KEY)]) - return hosts - - def register_router(self, hostname, expire=-1): - self._add_key_with_expire(_ROUTERS_KEY, hostname, expire) - - def unregister_router(self, hostname): - self._redis.srem(_ROUTERS_KEY, hostname) - - def get_routers(self): - return self._get_hosts_by_key(_ROUTERS_KEY) - - def _get_hosts_by_key(self, key): - return self._redis.smembers(key) - - def register(self, target, hostname, listener_type, expire=-1): - if target.topic and target.server: - key = zmq_address.target_to_key(target, listener_type) - self._add_key_with_expire(key, hostname, expire) - - if target.topic: - key = zmq_address.prefix_str(target.topic, listener_type) - self._add_key_with_expire(key, hostname, expire) - - def unregister(self, target, hostname, listener_type): - if target.topic and target.server: - key = zmq_address.target_to_key(target, listener_type) - self._redis.srem(key, hostname) - - if target.topic: - key = zmq_address.prefix_str(target.topic, listener_type) - self._redis.srem(key, hostname) - - def get_hosts(self, target, listener_type): - LOG.debug("[Redis] get_hosts for target %s", target) - - hosts = [] - - if target.topic and target.server: - key = zmq_address.target_to_key(target, listener_type) - hosts.extend(self._get_hosts_by_key(key)) - - if not hosts and target.topic: - key = zmq_address.prefix_str(target.topic, listener_type) - hosts.extend(self._get_hosts_by_key(key)) - - return hosts diff --git a/oslo_messaging/_drivers/zmq_driver/poller/__init__.py b/oslo_messaging/_drivers/zmq_driver/poller/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/poller/green_poller.py b/oslo_messaging/_drivers/zmq_driver/poller/green_poller.py deleted file mode 100644 index 591b2ac..0000000 --- a/oslo_messaging/_drivers/zmq_driver/poller/green_poller.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -import eventlet - -from oslo_messaging._drivers.zmq_driver import zmq_poller - - -class GreenPoller(zmq_poller.ZmqPoller): - - def __init__(self): - self.incoming_queue = eventlet.queue.LightQueue() - self.green_pool = eventlet.GreenPool() - self.thread_by_socket = {} - - def register(self, socket, recv_method=None): - if socket not in self.thread_by_socket: - self.thread_by_socket[socket] = self.green_pool.spawn( - self._socket_receive, socket, recv_method) - - def _socket_receive(self, socket, recv_method=None): - while True: - if recv_method: - incoming = recv_method(socket) - else: - incoming = socket.recv_multipart() - self.incoming_queue.put((incoming, socket)) - eventlet.sleep() - - def poll(self, timeout=None): - try: - return self.incoming_queue.get(timeout=timeout) - except eventlet.queue.Empty: - return None, None - - def close(self): - for thread in self.thread_by_socket.values(): - thread.kill() - - self.thread_by_socket = {} - - -class GreenExecutor(zmq_poller.Executor): - - def __init__(self, method): - self._method = method - super(GreenExecutor, self).__init__(None) - self._done = threading.Event() - - def _loop(self): - while not self._done.is_set(): - self._method() - eventlet.sleep() - - def execute(self): - self.thread = eventlet.spawn(self._loop) - - def wait(self): - if self.thread is not None: - self.thread.wait() - - def stop(self): - if self.thread is not None: - self.thread.kill() - - def done(self): - self._done.set() diff --git a/oslo_messaging/_drivers/zmq_driver/poller/threading_poller.py b/oslo_messaging/_drivers/zmq_driver/poller/threading_poller.py deleted file mode 100644 index edcba7d..0000000 --- a/oslo_messaging/_drivers/zmq_driver/poller/threading_poller.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import threading - -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_poller - -zmq = zmq_async.import_zmq() - -LOG = logging.getLogger(__name__) - - -class ThreadingPoller(zmq_poller.ZmqPoller): - - def __init__(self): - self.poller = zmq.Poller() - self.recv_methods = {} - - def register(self, socket, recv_method=None): - if socket in self.recv_methods: - return - LOG.debug("Registering socket") - if recv_method is not None: - self.recv_methods[socket] = recv_method - self.poller.register(socket, zmq.POLLIN) - - def poll(self, timeout=None): - if timeout is not None and timeout > 0: - timeout *= 1000 # convert seconds to milliseconds - - sockets = {} - try: - sockets = dict(self.poller.poll(timeout=timeout)) - except zmq.ZMQError as e: - LOG.debug("Polling terminated with error: %s", e) - - if not sockets: - return None, None - for socket in sockets: - if socket in self.recv_methods: - return self.recv_methods[socket](socket), socket - else: - return socket.recv_multipart(), socket - - def close(self): - pass # Nothing to do for threading poller - - -class ThreadingExecutor(zmq_poller.Executor): - - def __init__(self, method): - self._method = method - super(ThreadingExecutor, self).__init__( - threading.Thread(target=self._loop)) - self._stop = threading.Event() - - def _loop(self): - while not self._stop.is_set(): - self._method() - - def execute(self): - self.thread.daemon = True - self.thread.start() - - def stop(self): - self._stop.set() - - def wait(self): - pass - - def done(self): - self._stop.set() diff --git a/oslo_messaging/_drivers/zmq_driver/proxy/__init__.py b/oslo_messaging/_drivers/zmq_driver/proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/proxy/zmq_proxy.py b/oslo_messaging/_drivers/zmq_driver/proxy/zmq_proxy.py deleted file mode 100644 index 15c7774..0000000 --- a/oslo_messaging/_drivers/zmq_driver/proxy/zmq_proxy.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import socket - -from stevedore import driver - -from oslo_config import cfg -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._i18n import _LI - -zmq = zmq_async.import_zmq() -LOG = logging.getLogger(__name__) - - -zmq_proxy_opts = [ - cfg.StrOpt('host', default=socket.gethostname(), - help='Hostname (FQDN) of current proxy' - ' an ethernet interface, or IP address.'), - - cfg.IntOpt('frontend_port', default=0, - help='Front-end ROUTER port number. Zero means random.'), - - cfg.IntOpt('backend_port', default=0, - help='Back-end ROUTER port number. Zero means random.'), - - cfg.IntOpt('publisher_port', default=0, - help='Publisher port number. Zero means random.'), -] - - -class ZmqProxy(object): - """Wrapper class for Publishers and Routers proxies. - The main reason to have a proxy is high complexity of TCP sockets number - growth with direct connections (when services connect directly to - each other). The general complexity for ZeroMQ+Openstack deployment - with direct connections may be square(N) (where N is a number of nodes - in deployment). With proxy the complexity is reduced to k*N where - k is a number of services. - - Currently there are 2 types of proxy, they are Publishers and Routers. - Publisher proxy serves for PUB-SUB pattern implementation where - Publisher is a server which performs broadcast to subscribers. - Router is used for direct message types in case of number of TCP socket - connections is critical for specific deployment. Generally 3 publishers - is enough for deployment. - - Router is used for direct messages in order to reduce the number of - allocated TCP sockets in controller. The list of requirements to Router: - - 1. There may be any number of routers in the deployment. Routers are - registered in a name-server and client connects dynamically to all of - them performing load balancing. - 2. Routers should be transparent for clients and servers. Which means - it doesn't change the way of messaging between client and the final - target by hiding the target from a client. - 3. Router may be restarted or shut down at any time losing all messages - in its queue. Smart retrying (based on acknowledgements from server - side) and load balancing between other Router instances from the - client side should handle the situation. - 4. Router takes all the routing information from message envelope and - doesn't perform Target-resolution in any way. - 5. Routers don't talk to each other and no synchronization is needed. - 6. Load balancing is performed by the client in a round-robin fashion. - - Those requirements should limit the performance impact caused by using - of proxies making proxies as lightweight as possible. - - """ - - def __init__(self, conf, proxy_cls): - super(ZmqProxy, self).__init__() - self.conf = conf - self.matchmaker = driver.DriverManager( - 'oslo.messaging.zmq.matchmaker', - self.conf.oslo_messaging_zmq.rpc_zmq_matchmaker, - ).driver(self.conf) - self.context = zmq.Context() - self.proxy = proxy_cls(conf, self.context, self.matchmaker) - - def run(self): - self.proxy.run() - - def close(self): - LOG.info(_LI("Proxy shutting down ...")) - self.proxy.cleanup() diff --git a/oslo_messaging/_drivers/zmq_driver/proxy/zmq_publisher_proxy.py b/oslo_messaging/_drivers/zmq_driver/proxy/zmq_publisher_proxy.py deleted file mode 100644 index 727b419..0000000 --- a/oslo_messaging/_drivers/zmq_driver/proxy/zmq_publisher_proxy.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._drivers.zmq_driver import zmq_socket - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class PublisherProxy(object): - """PUB/SUB based request publisher - - The publisher intended to be used for Fanout and Notify - multi-sending patterns. - - It differs from direct publishers like DEALER or PUSH based - in a way it treats matchmaker. Here all publishers register - in the matchmaker. Subscribers (server-side) take the list - of publishers and connect to all of them but subscribe - only to a specific topic-filtering tag generated from the - Target object. - """ - - def __init__(self, conf, matchmaker): - super(PublisherProxy, self).__init__() - self.conf = conf - self.zmq_context = zmq.Context() - self.matchmaker = matchmaker - - port = conf.zmq_proxy_opts.publisher_port - - self.socket = zmq_socket.ZmqFixedPortSocket( - self.conf, self.zmq_context, zmq.PUB, conf.zmq_proxy_opts.host, - port) if port != 0 else \ - zmq_socket.ZmqRandomPortSocket( - self.conf, self.zmq_context, zmq.PUB, conf.zmq_proxy_opts.host) - - self.host = self.socket.connect_address - - def send_request(self, multipart_message): - message_type = multipart_message.pop(0) - assert message_type in (zmq_names.CAST_FANOUT_TYPE, - zmq_names.NOTIFY_TYPE), "Fanout expected!" - topic_filter = multipart_message.pop(0) - reply_id = multipart_message.pop(0) - message_id = multipart_message.pop(0) - assert reply_id is not None, "Reply id expected!" - - self.socket.send(topic_filter, zmq.SNDMORE) - self.socket.send(message_id, zmq.SNDMORE) - self.socket.send_multipart(multipart_message) - - LOG.debug("Publishing message %(message_id)s on [%(topic)s]", - {"topic": topic_filter, - "message_id": message_id}) - - def cleanup(self): - self.socket.close() diff --git a/oslo_messaging/_drivers/zmq_driver/proxy/zmq_queue_proxy.py b/oslo_messaging/_drivers/zmq_driver/proxy/zmq_queue_proxy.py deleted file mode 100644 index 4c747ab..0000000 --- a/oslo_messaging/_drivers/zmq_driver/proxy/zmq_queue_proxy.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import six - -from oslo_messaging._drivers.zmq_driver.proxy import zmq_publisher_proxy -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._drivers.zmq_driver import zmq_socket -from oslo_messaging._drivers.zmq_driver import zmq_updater -from oslo_messaging._i18n import _LI - -zmq = zmq_async.import_zmq() -LOG = logging.getLogger(__name__) - - -class UniversalQueueProxy(object): - - def __init__(self, conf, context, matchmaker): - self.conf = conf - self.context = context - super(UniversalQueueProxy, self).__init__() - self.matchmaker = matchmaker - self.poller = zmq_async.get_poller() - - port = conf.zmq_proxy_opts.frontend_port - host = conf.zmq_proxy_opts.host - self.fe_router_socket = zmq_socket.ZmqFixedPortSocket( - conf, context, zmq.ROUTER, host, - conf.zmq_proxy_opts.frontend_port) if port != 0 else \ - zmq_socket.ZmqRandomPortSocket(conf, context, zmq.ROUTER, host) - - port = conf.zmq_proxy_opts.backend_port - self.be_router_socket = zmq_socket.ZmqFixedPortSocket( - conf, context, zmq.ROUTER, host, - conf.zmq_proxy_opts.backend_port) if port != 0 else \ - zmq_socket.ZmqRandomPortSocket(conf, context, zmq.ROUTER, host) - - self.poller.register(self.fe_router_socket.handle, - self._receive_in_request) - self.poller.register(self.be_router_socket.handle, - self._receive_in_request) - - self.pub_publisher = zmq_publisher_proxy.PublisherProxy( - conf, matchmaker) - - self._router_updater = RouterUpdater( - conf, matchmaker, self.pub_publisher.host, - self.fe_router_socket.connect_address, - self.be_router_socket.connect_address) - - def run(self): - message, socket = self.poller.poll() - if message is None: - return - - msg_type = message[0] - if self.conf.oslo_messaging_zmq.use_pub_sub and \ - msg_type in (zmq_names.CAST_FANOUT_TYPE, - zmq_names.NOTIFY_TYPE): - self.pub_publisher.send_request(message) - else: - self._redirect_message(self.be_router_socket.handle - if socket is self.fe_router_socket.handle - else self.fe_router_socket.handle, message) - - @staticmethod - def _receive_in_request(socket): - try: - reply_id = socket.recv() - assert reply_id is not None, "Valid id expected" - empty = socket.recv() - assert empty == b'', "Empty delimiter expected" - msg_type = int(socket.recv()) - routing_key = socket.recv() - payload = socket.recv_multipart() - payload.insert(0, reply_id) - payload.insert(0, routing_key) - payload.insert(0, msg_type) - return payload - except (AssertionError, ValueError, zmq.ZMQError): - LOG.error("Received message with wrong format") - return None - - @staticmethod - def _redirect_message(socket, multipart_message): - message_type = multipart_message.pop(0) - routing_key = multipart_message.pop(0) - reply_id = multipart_message.pop(0) - message_id = multipart_message[0] - socket.send(routing_key, zmq.SNDMORE) - socket.send(b'', zmq.SNDMORE) - socket.send(reply_id, zmq.SNDMORE) - socket.send(six.b(str(message_type)), zmq.SNDMORE) - LOG.debug("Dispatching %(msg_type)s message %(msg_id)s - to %(rkey)s" % - {"msg_type": zmq_names.message_type_str(message_type), - "msg_id": message_id, - "rkey": routing_key}) - socket.send_multipart(multipart_message) - - def cleanup(self): - self.fe_router_socket.close() - self.be_router_socket.close() - self.pub_publisher.cleanup() - self._router_updater.cleanup() - - -class RouterUpdater(zmq_updater.UpdaterBase): - """This entity performs periodic async updates - from router proxy to the matchmaker. - """ - - def __init__(self, conf, matchmaker, publisher_address, fe_router_address, - be_router_address): - self.publisher_address = publisher_address - self.fe_router_address = fe_router_address - self.be_router_address = be_router_address - super(RouterUpdater, self).__init__(conf, matchmaker, - self._update_records) - - def _update_records(self): - self.matchmaker.register_publisher( - (self.publisher_address, self.fe_router_address), - expire=self.conf.oslo_messaging_zmq.zmq_target_expire) - LOG.info(_LI("[PUB:%(pub)s, ROUTER:%(router)s] Update PUB publisher"), - {"pub": self.publisher_address, - "router": self.fe_router_address}) - self.matchmaker.register_router( - self.be_router_address, - expire=self.conf.oslo_messaging_zmq.zmq_target_expire) - LOG.info(_LI("[Backend ROUTER:%(router)s] Update ROUTER"), - {"router": self.be_router_address}) - - def cleanup(self): - super(RouterUpdater, self).cleanup() - self.matchmaker.unregister_publisher( - (self.publisher_address, self.fe_router_address)) - self.matchmaker.unregister_router( - self.be_router_address) diff --git a/oslo_messaging/_drivers/zmq_driver/server/__init__.py b/oslo_messaging/_drivers/zmq_driver/server/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/server/consumers/__init__.py b/oslo_messaging/_drivers/zmq_driver/server/consumers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_consumer_base.py b/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_consumer_base.py deleted file mode 100644 index 69a7077..0000000 --- a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_consumer_base.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import logging - -import six - -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers.zmq_driver import zmq_address -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._drivers.zmq_driver import zmq_socket -from oslo_messaging._drivers.zmq_driver import zmq_updater -from oslo_messaging._i18n import _LE - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -@six.add_metaclass(abc.ABCMeta) -class ConsumerBase(object): - - def __init__(self, conf, poller, server): - self.conf = conf - self.poller = poller - self.server = server - self.sockets = [] - self.context = zmq.Context() - - def stop(self): - """Stop consumer polling/updates""" - pass - - @abc.abstractmethod - def receive_message(self, target): - """Method for poller - receiving message routine""" - - def cleanup(self): - for socket in self.sockets: - if not socket.handle.closed: - socket.close() - self.sockets = [] - - -class SingleSocketConsumer(ConsumerBase): - - def __init__(self, conf, poller, server, socket_type): - super(SingleSocketConsumer, self).__init__(conf, poller, server) - self.matchmaker = server.matchmaker - self.target = server.target - self.socket_type = socket_type - self.host = None - self.socket = self.subscribe_socket(socket_type) - self.target_updater = TargetUpdater( - conf, self.matchmaker, self.target, self.host, socket_type) - - def stop(self): - self.target_updater.stop() - - def subscribe_socket(self, socket_type): - try: - socket = zmq_socket.ZmqRandomPortSocket( - self.conf, self.context, socket_type) - self.sockets.append(socket) - LOG.debug("Run %(stype)s consumer on %(addr)s:%(port)d", - {"stype": zmq_names.socket_type_str(socket_type), - "addr": socket.bind_address, - "port": socket.port}) - self.host = zmq_address.combine_address( - self.conf.oslo_messaging_zmq.rpc_zmq_host, socket.port) - self.poller.register(socket, self.receive_message) - return socket - except zmq.ZMQError as e: - errmsg = _LE("Failed binding to port %(port)d: %(e)s")\ - % (self.port, e) - LOG.error(_LE("Failed binding to port %(port)d: %(e)s"), - (self.port, e)) - raise rpc_common.RPCException(errmsg) - - @property - def address(self): - return self.socket.bind_address - - @property - def port(self): - return self.socket.port - - def cleanup(self): - self.target_updater.cleanup() - super(SingleSocketConsumer, self).cleanup() - - -class TargetUpdater(zmq_updater.UpdaterBase): - """This entity performs periodic async updates - to the matchmaker. - """ - - def __init__(self, conf, matchmaker, target, host, socket_type): - self.target = target - self.host = host - self.socket_type = socket_type - super(TargetUpdater, self).__init__(conf, matchmaker, - self._update_target) - - def _update_target(self): - self.matchmaker.register( - self.target, self.host, - zmq_names.socket_type_str(self.socket_type), - expire=self.conf.oslo_messaging_zmq.zmq_target_expire) - - def stop(self): - super(TargetUpdater, self).stop() - self.matchmaker.unregister( - self.target, self.host, - zmq_names.socket_type_str(self.socket_type)) diff --git a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_dealer_consumer.py b/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_dealer_consumer.py deleted file mode 100644 index 5f5e8ef..0000000 --- a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_dealer_consumer.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers.zmq_driver.client import zmq_senders -from oslo_messaging._drivers.zmq_driver.client import zmq_sockets_manager -from oslo_messaging._drivers.zmq_driver.server.consumers \ - import zmq_consumer_base -from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._drivers.zmq_driver import zmq_updater -from oslo_messaging._i18n import _LE, _LI - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class DealerConsumer(zmq_consumer_base.SingleSocketConsumer): - - def __init__(self, conf, poller, server): - self.sender = zmq_senders.ReplySenderProxy(conf) - self.sockets_manager = zmq_sockets_manager.SocketsManager( - conf, server.matchmaker, zmq.ROUTER, zmq.DEALER) - self.host = None - super(DealerConsumer, self).__init__(conf, poller, server, zmq.DEALER) - self.connection_updater = ConsumerConnectionUpdater( - conf, self.matchmaker, self.socket) - LOG.info(_LI("[%s] Run DEALER consumer"), self.host) - - def subscribe_socket(self, socket_type): - try: - socket = self.sockets_manager.get_socket_to_routers() - self.sockets.append(socket) - self.host = socket.handle.identity - self.poller.register(socket, self.receive_message) - return socket - except zmq.ZMQError as e: - LOG.error(_LE("Failed connecting to ROUTER socket %(e)s") % e) - raise rpc_common.RPCException(str(e)) - - def receive_message(self, socket): - try: - empty = socket.recv() - assert empty == b'', 'Bad format: empty delimiter expected' - reply_id = socket.recv() - message_type = int(socket.recv()) - message_id = socket.recv() - context = socket.recv_loaded() - message = socket.recv_loaded() - LOG.debug("[%(host)s] Received %(msg_type)s message %(msg_id)s", - {"host": self.host, - "msg_type": zmq_names.message_type_str(message_type), - "msg_id": message_id}) - if message_type == zmq_names.CALL_TYPE: - return zmq_incoming_message.ZmqIncomingMessage( - context, message, reply_id, message_id, socket, self.sender - ) - elif message_type in zmq_names.NON_BLOCKING_TYPES: - return zmq_incoming_message.ZmqIncomingMessage(context, - message) - else: - LOG.error(_LE("Unknown message type: %s"), - zmq_names.message_type_str(message_type)) - except (zmq.ZMQError, AssertionError, ValueError) as e: - LOG.error(_LE("Receiving message failure: %s"), str(e)) - - def cleanup(self): - LOG.info(_LI("[%s] Destroy DEALER consumer"), self.host) - self.connection_updater.cleanup() - super(DealerConsumer, self).cleanup() - - -class ConsumerConnectionUpdater(zmq_updater.ConnectionUpdater): - - def _update_connection(self): - routers = self.matchmaker.get_routers() - for router_address in routers: - self.socket.connect_to_host(router_address) diff --git a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py b/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py deleted file mode 100644 index f9913cb..0000000 --- a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from oslo_messaging._drivers.zmq_driver.client import zmq_senders -from oslo_messaging._drivers.zmq_driver.server.consumers \ - import zmq_consumer_base -from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._i18n import _LE, _LI - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class RouterConsumer(zmq_consumer_base.SingleSocketConsumer): - - def __init__(self, conf, poller, server): - self.sender = zmq_senders.ReplySenderDirect(conf) - super(RouterConsumer, self).__init__(conf, poller, server, zmq.ROUTER) - LOG.info(_LI("[%s] Run ROUTER consumer"), self.host) - - def _receive_request(self, socket): - reply_id = socket.recv() - empty = socket.recv() - assert empty == b'', 'Bad format: empty delimiter expected' - msg_type = int(socket.recv()) - message_id = socket.recv_string() - context = socket.recv_loaded() - message = socket.recv_loaded() - return reply_id, msg_type, message_id, context, message - - def receive_message(self, socket): - try: - reply_id, msg_type, message_id, context, message = \ - self._receive_request(socket) - LOG.debug("[%(host)s] Received %(msg_type)s message %(msg_id)s", - {"host": self.host, - "msg_type": zmq_names.message_type_str(msg_type), - "msg_id": message_id}) - - if msg_type == zmq_names.CALL_TYPE: - return zmq_incoming_message.ZmqIncomingMessage( - context, message, reply_id, message_id, socket, self.sender - ) - elif msg_type in zmq_names.NON_BLOCKING_TYPES: - return zmq_incoming_message.ZmqIncomingMessage(context, - message) - else: - LOG.error(_LE("Unknown message type: %s"), - zmq_names.message_type_str(msg_type)) - except (zmq.ZMQError, AssertionError, ValueError) as e: - LOG.error(_LE("Receiving message failed: %s"), str(e)) - - def cleanup(self): - LOG.info(_LI("[%s] Destroy ROUTER consumer"), self.host) - super(RouterConsumer, self).cleanup() diff --git a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_sub_consumer.py b/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_sub_consumer.py deleted file mode 100644 index 6fd13b7..0000000 --- a/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_sub_consumer.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import six - -from oslo_messaging._drivers.zmq_driver.server.consumers \ - import zmq_consumer_base -from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message -from oslo_messaging._drivers.zmq_driver import zmq_address -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_socket -from oslo_messaging._i18n import _LE - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class SubConsumer(zmq_consumer_base.ConsumerBase): - - def __init__(self, conf, poller, server): - super(SubConsumer, self).__init__(conf, poller, server) - self.matchmaker = server.matchmaker - self.target = server.target - self.socket = zmq_socket.ZmqSocket(self.conf, self.context, zmq.SUB) - self.sockets.append(self.socket) - self._subscribe_on_target(self.target) - self.on_publishers(self.matchmaker.get_publishers()) - self.poller.register(self.socket, self.receive_message) - - def on_publishers(self, publishers): - for host, sync in publishers: - self.socket.connect(zmq_address.get_tcp_direct_address(host)) - LOG.debug("[%s] SUB consumer connected to publishers %s", - self.socket.handle.identity, publishers) - - def _subscribe_on_target(self, target): - topic_filter = zmq_address.target_to_subscribe_filter(target) - if target.topic: - self.socket.setsockopt(zmq.SUBSCRIBE, six.b(target.topic)) - if target.server: - self.socket.setsockopt(zmq.SUBSCRIBE, six.b(target.server)) - if target.topic and target.server: - self.socket.setsockopt(zmq.SUBSCRIBE, topic_filter) - LOG.debug("[%(host)s] Subscribing to topic %(filter)s", - {"host": self.socket.handle.identity, - "filter": topic_filter}) - - @staticmethod - def _receive_request(socket): - topic_filter = socket.recv() - message_id = socket.recv() - context = socket.recv_loaded() - message = socket.recv_loaded() - LOG.debug("Received %(topic_filter)s topic message %(id)s", - {'id': message_id, 'topic_filter': topic_filter}) - return context, message - - def receive_message(self, socket): - try: - context, message = self._receive_request(socket) - if not message: - return None - return zmq_incoming_message.ZmqIncomingMessage(context, message) - except (zmq.ZMQError, AssertionError) as e: - LOG.error(_LE("Receiving message failed: %s"), str(e)) - - def cleanup(self): - super(SubConsumer, self).cleanup() diff --git a/oslo_messaging/_drivers/zmq_driver/server/zmq_incoming_message.py b/oslo_messaging/_drivers/zmq_driver/server/zmq_incoming_message.py deleted file mode 100644 index 0ebfef5..0000000 --- a/oslo_messaging/_drivers/zmq_driver/server/zmq_incoming_message.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import logging - -from oslo_messaging._drivers import base -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers.zmq_driver.client import zmq_response -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class ZmqIncomingMessage(base.RpcIncomingMessage): - - def __init__(self, context, message, reply_id=None, message_id=None, - socket=None, sender=None): - - if sender is not None: - assert socket is not None, "Valid socket expected!" - assert message_id is not None, "Valid message ID expected!" - assert reply_id is not None, "Valid reply ID expected!" - - super(ZmqIncomingMessage, self).__init__(context, message) - - self.reply_id = reply_id - self.message_id = message_id - self.socket = socket - self.sender = sender - - def acknowledge(self): - """Not sending acknowledge""" - - def reply(self, reply=None, failure=None): - if self.sender is not None: - if failure is not None: - failure = rpc_common.serialize_remote_exception(failure) - reply = zmq_response.Response(msg_type=zmq_names.REPLY_TYPE, - message_id=self.message_id, - reply_id=self.reply_id, - reply_body=reply, - failure=failure) - self.sender.send(self.socket, reply) - - def requeue(self): - """Requeue is not supported""" diff --git a/oslo_messaging/_drivers/zmq_driver/server/zmq_server.py b/oslo_messaging/_drivers/zmq_driver/server/zmq_server.py deleted file mode 100644 index b40bdc0..0000000 --- a/oslo_messaging/_drivers/zmq_driver/server/zmq_server.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import logging - -from oslo_messaging._drivers import base -from oslo_messaging._drivers.zmq_driver.server.consumers\ - import zmq_dealer_consumer -from oslo_messaging._drivers.zmq_driver.server.consumers\ - import zmq_router_consumer -from oslo_messaging._drivers.zmq_driver.server.consumers\ - import zmq_sub_consumer -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._i18n import _LI - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class ZmqServer(base.PollStyleListener): - - def __init__(self, driver, conf, matchmaker, target, poller=None): - super(ZmqServer, self).__init__() - self.driver = driver - self.conf = conf - self.matchmaker = matchmaker - self.target = target - self.poller = poller or zmq_async.get_poller() - - self.router_consumer = zmq_router_consumer.RouterConsumer( - conf, self.poller, self) \ - if not conf.oslo_messaging_zmq.use_router_proxy else None - self.dealer_consumer = zmq_dealer_consumer.DealerConsumer( - conf, self.poller, self) \ - if conf.oslo_messaging_zmq.use_router_proxy else None - self.sub_consumer = zmq_sub_consumer.SubConsumer( - conf, self.poller, self) \ - if conf.oslo_messaging_zmq.use_pub_sub else None - - self.consumers = [] - if self.router_consumer is not None: - self.consumers.append(self.router_consumer) - if self.dealer_consumer is not None: - self.consumers.append(self.dealer_consumer) - if self.sub_consumer is not None: - self.consumers.append(self.sub_consumer) - - @base.batch_poll_helper - def poll(self, timeout=None): - message, socket = self.poller.poll( - timeout or self.conf.oslo_messaging_zmq.rpc_poll_timeout) - return message - - def stop(self): - self.poller.close() - LOG.info(_LI("Stop server %(target)s"), {'target': self.target}) - for consumer in self.consumers: - consumer.stop() - - def cleanup(self): - self.poller.close() - for consumer in self.consumers: - consumer.cleanup() - - -class ZmqNotificationServer(base.PollStyleListener): - - def __init__(self, driver, conf, matchmaker, targets_and_priorities): - super(ZmqNotificationServer, self).__init__() - self.driver = driver - self.conf = conf - self.matchmaker = matchmaker - self.servers = [] - self.poller = zmq_async.get_poller() - self._listen(targets_and_priorities) - - def _listen(self, targets_and_priorities): - for target, priority in targets_and_priorities: - t = copy.deepcopy(target) - t.topic = target.topic + '.' + priority - self.servers.append(ZmqServer( - self.driver, self.conf, self.matchmaker, t, self.poller)) - - @base.batch_poll_helper - def poll(self, timeout=None): - message, socket = self.poller.poll( - timeout or self.conf.oslo_messaging_zmq.rpc_poll_timeout) - return message - - def stop(self): - for server in self.servers: - server.stop() - - def cleanup(self): - for server in self.servers: - server.cleanup() diff --git a/oslo_messaging/_drivers/zmq_driver/zmq_address.py b/oslo_messaging/_drivers/zmq_driver/zmq_address.py deleted file mode 100644 index 0175e7e..0000000 --- a/oslo_messaging/_drivers/zmq_driver/zmq_address.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - - -def combine_address(host, port): - return "%s:%s" % (host, port) - - -def get_tcp_direct_address(host): - return "tcp://%s" % str(host) - - -def get_tcp_random_address(conf): - return "tcp://%s" % conf.oslo_messaging_zmq.rpc_zmq_bind_address - - -def get_broker_address(conf): - return "ipc://%s/zmq-broker" % conf.oslo_messaging_zmq.rpc_zmq_ipc_dir - - -def prefix_str(key, listener_type): - return listener_type + "_" + key - - -def target_to_key(target, listener_type): - - def prefix(key): - return prefix_str(key, listener_type) - - if target.topic and target.server: - attributes = ['topic', 'server'] - key = ".".join(getattr(target, attr) for attr in attributes) - return prefix(key) - if target.topic: - return prefix(target.topic) - - -def target_to_subscribe_filter(target): - if target.topic and target.server: - attributes = ['topic', 'server'] - key = "/".join(getattr(target, attr) for attr in attributes) - return six.b(key) - if target.topic: - return six.b(target.topic) - if target.server: - return six.b(target.server) diff --git a/oslo_messaging/_drivers/zmq_driver/zmq_async.py b/oslo_messaging/_drivers/zmq_driver/zmq_async.py deleted file mode 100644 index a248059..0000000 --- a/oslo_messaging/_drivers/zmq_driver/zmq_async.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import eventletutils -from oslo_utils import importutils - - -def import_zmq(): - imported_zmq = importutils.try_import( - 'eventlet.green.zmq' if eventletutils.is_monkey_patched('thread') else - 'zmq', default=None - ) - return imported_zmq - - -def get_poller(): - if eventletutils.is_monkey_patched('thread'): - from oslo_messaging._drivers.zmq_driver.poller import green_poller - return green_poller.GreenPoller() - - from oslo_messaging._drivers.zmq_driver.poller import threading_poller - return threading_poller.ThreadingPoller() - - -def get_executor(method): - if eventletutils.is_monkey_patched('thread'): - from oslo_messaging._drivers.zmq_driver.poller import green_poller - return green_poller.GreenExecutor(method) - - from oslo_messaging._drivers.zmq_driver.poller import threading_poller - return threading_poller.ThreadingExecutor(method) - - -def get_queue(): - if eventletutils.is_monkey_patched('thread'): - import eventlet - return eventlet.queue.Queue(), eventlet.queue.Empty - - import six - return six.moves.queue.Queue(), six.moves.queue.Empty diff --git a/oslo_messaging/_drivers/zmq_driver/zmq_names.py b/oslo_messaging/_drivers/zmq_driver/zmq_names.py deleted file mode 100644 index f61003c..0000000 --- a/oslo_messaging/_drivers/zmq_driver/zmq_names.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_messaging._drivers.zmq_driver import zmq_async - -zmq = zmq_async.import_zmq() - - -FIELD_MSG_TYPE = 'msg_type' -FIELD_MSG_ID = 'message_id' -FIELD_REPLY_ID = 'reply_id' -FIELD_REPLY_BODY = 'reply_body' -FIELD_FAILURE = 'failure' - - -IDX_REPLY_TYPE = 1 -IDX_REPLY_BODY = 2 - -MULTIPART_IDX_ENVELOPE = 0 -MULTIPART_IDX_BODY = 1 - - -CALL_TYPE = 1 -CAST_TYPE = 2 -CAST_FANOUT_TYPE = 3 -NOTIFY_TYPE = 4 -REPLY_TYPE = 5 -ACK_TYPE = 6 - -MESSAGE_TYPES = (CALL_TYPE, - CAST_TYPE, - CAST_FANOUT_TYPE, - NOTIFY_TYPE) - -MULTISEND_TYPES = (CAST_FANOUT_TYPE, NOTIFY_TYPE) -DIRECT_TYPES = (CALL_TYPE, CAST_TYPE, REPLY_TYPE) -CAST_TYPES = (CAST_TYPE, CAST_FANOUT_TYPE) -NOTIFY_TYPES = (NOTIFY_TYPE,) -NON_BLOCKING_TYPES = CAST_TYPES + NOTIFY_TYPES - - -def socket_type_str(socket_type): - zmq_socket_str = {zmq.DEALER: "DEALER", - zmq.ROUTER: "ROUTER", - zmq.PUSH: "PUSH", - zmq.PULL: "PULL", - zmq.REQ: "REQ", - zmq.REP: "REP", - zmq.PUB: "PUB", - zmq.SUB: "SUB"} - return zmq_socket_str[socket_type] - - -def message_type_str(message_type): - msg_type_str = {CALL_TYPE: "CALL", - CAST_TYPE: "CAST", - CAST_FANOUT_TYPE: "CAST_FANOUT", - NOTIFY_TYPE: "NOTIFY", - REPLY_TYPE: "REPLY", - ACK_TYPE: "ACK"} - return msg_type_str[message_type] diff --git a/oslo_messaging/_drivers/zmq_driver/zmq_options.py b/oslo_messaging/_drivers/zmq_driver/zmq_options.py deleted file mode 100644 index 2ac76f9..0000000 --- a/oslo_messaging/_drivers/zmq_driver/zmq_options.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - -from oslo_config import cfg - -from oslo_messaging._drivers import base -from oslo_messaging import server - - -MATCHMAKER_BACKENDS = ('redis', 'dummy') -MATCHMAKER_DEFAULT = 'redis' - - -zmq_opts = [ - cfg.StrOpt('rpc_zmq_bind_address', default='*', - deprecated_group='DEFAULT', - help='ZeroMQ bind address. Should be a wildcard (*), ' - 'an ethernet interface, or IP. ' - 'The "host" option should point or resolve to this ' - 'address.'), - - cfg.StrOpt('rpc_zmq_matchmaker', default=MATCHMAKER_DEFAULT, - choices=MATCHMAKER_BACKENDS, - deprecated_group='DEFAULT', - help='MatchMaker driver.'), - - cfg.IntOpt('rpc_zmq_contexts', default=1, - deprecated_group='DEFAULT', - help='Number of ZeroMQ contexts, defaults to 1.'), - - cfg.IntOpt('rpc_zmq_topic_backlog', - deprecated_group='DEFAULT', - help='Maximum number of ingress messages to locally buffer ' - 'per topic. Default is unlimited.'), - - cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', - deprecated_group='DEFAULT', - help='Directory for holding IPC sockets.'), - - cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), - sample_default='localhost', - deprecated_group='DEFAULT', - help='Name of this node. Must be a valid hostname, FQDN, or ' - 'IP address. Must match "host" option, if running Nova.'), - - cfg.IntOpt('rpc_cast_timeout', default=-1, - deprecated_group='DEFAULT', - help='Seconds to wait before a cast expires (TTL). ' - 'The default value of -1 specifies an infinite linger ' - 'period. The value of 0 specifies no linger period. ' - 'Pending messages shall be discarded immediately ' - 'when the socket is closed. Only supported by impl_zmq.'), - - cfg.IntOpt('rpc_poll_timeout', default=1, - deprecated_group='DEFAULT', - help='The default number of seconds that poll should wait. ' - 'Poll raises timeout exception when timeout expired.'), - - cfg.IntOpt('zmq_target_expire', default=300, - deprecated_group='DEFAULT', - help='Expiration timeout in seconds of a name service record ' - 'about existing target ( < 0 means no timeout).'), - - cfg.IntOpt('zmq_target_update', default=180, - deprecated_group='DEFAULT', - help='Update period in seconds of a name service record ' - 'about existing target.'), - - cfg.BoolOpt('use_pub_sub', default=True, - deprecated_group='DEFAULT', - help='Use PUB/SUB pattern for fanout methods. ' - 'PUB/SUB always uses proxy.'), - - cfg.BoolOpt('use_router_proxy', default=True, - deprecated_group='DEFAULT', - help='Use ROUTER remote proxy.'), - - cfg.PortOpt('rpc_zmq_min_port', - default=49153, - deprecated_group='DEFAULT', - help='Minimal port number for random ports range.'), - - cfg.IntOpt('rpc_zmq_max_port', - min=1, - max=65536, - default=65536, - deprecated_group='DEFAULT', - help='Maximal port number for random ports range.'), - - cfg.IntOpt('rpc_zmq_bind_port_retries', - default=100, - deprecated_group='DEFAULT', - help='Number of retries to find free port number before ' - 'fail with ZMQBindError.'), - - cfg.StrOpt('rpc_zmq_serialization', default='json', - choices=('json', 'msgpack'), - deprecated_group='DEFAULT', - help='Default serialization mechanism for ' - 'serializing/deserializing outgoing/incoming messages') -] - - -def register_opts(conf): - opt_group = cfg.OptGroup(name='oslo_messaging_zmq', - title='ZeroMQ driver options') - conf.register_opts(zmq_opts, group=opt_group) - conf.register_opts(server._pool_opts) - conf.register_opts(base.base_opts) diff --git a/oslo_messaging/_drivers/zmq_driver/zmq_poller.py b/oslo_messaging/_drivers/zmq_driver/zmq_poller.py deleted file mode 100644 index 28fe6c8..0000000 --- a/oslo_messaging/_drivers/zmq_driver/zmq_poller.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class ZmqPoller(object): - - """Base poller interface - - Needed to poll on zmq sockets in green and native async manner. - Native poller implementation wraps zmq.Poller helper class. - Wrapping is needed to provide unified poller interface - in zmq-driver (for both native and zmq pollers). It makes some - difference with poller-helper from zmq library which doesn't actually - receive message. - - The poller object should be obtained over: - - poller = zmq_async.get_poller() - - Then we have to register sockets for polling. We are able - to provide specific receiving method. By default poller calls - socket.recv_multipart. - - def receive_message(socket): - id = socket.recv_string() - ctxt = socket.recv_json() - msg = socket.recv_json() - return (id, ctxt, msg) - - poller.register(socket, recv_method=receive_message) - - Further to receive a message we should call: - - message, socket = poller.poll() - - The 'message' here contains (id, ctxt, msg) tuple. - """ - - @abc.abstractmethod - def register(self, socket, recv_method=None): - """Register socket to poll - - :param socket: Socket to subscribe for polling - :type socket: zmq.Socket - :param recv_method: Optional specific receiver procedure - Should return received message object - :type recv_method: callable - """ - - @abc.abstractmethod - def poll(self, timeout=None): - """Poll for messages - - :param timeout: Optional polling timeout - None or -1 means poll forever - any positive value means timeout in seconds - :type timeout: int - :returns: (message, socket) tuple - """ - - @abc.abstractmethod - def close(self): - """Terminate polling""" - - -@six.add_metaclass(abc.ABCMeta) -class Executor(object): - """Base executor interface for threading/green async executors""" - - def __init__(self, thread): - self.thread = thread - - @abc.abstractmethod - def execute(self): - """Run execution""" - - @abc.abstractmethod - def stop(self): - """Stop execution""" - - @abc.abstractmethod - def wait(self): - """Wait until pass""" - - @abc.abstractmethod - def done(self): - """More soft way to stop rather than killing thread""" diff --git a/oslo_messaging/_drivers/zmq_driver/zmq_socket.py b/oslo_messaging/_drivers/zmq_driver/zmq_socket.py deleted file mode 100644 index 285eafa..0000000 --- a/oslo_messaging/_drivers/zmq_driver/zmq_socket.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import uuid - -import six - -from oslo_messaging._drivers import common as rpc_common -from oslo_messaging._drivers.zmq_driver import zmq_address -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging._i18n import _LE, _LI -from oslo_messaging import exceptions -from oslo_serialization.serializer import json_serializer -from oslo_serialization.serializer import msgpack_serializer - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class ZmqSocket(object): - - SERIALIZERS = { - 'json': json_serializer.JSONSerializer(), - 'msgpack': msgpack_serializer.MessagePackSerializer() - } - - def __init__(self, conf, context, socket_type, immediate=True, - high_watermark=0): - self.conf = conf - self.context = context - self.socket_type = socket_type - self.handle = context.socket(socket_type) - self.handle.set_hwm(high_watermark) - - self.close_linger = -1 - if self.conf.oslo_messaging_zmq.rpc_cast_timeout > 0: - self.close_linger = \ - self.conf.oslo_messaging_zmq.rpc_cast_timeout * 1000 - self.handle.setsockopt(zmq.LINGER, self.close_linger) - # Put messages to only connected queues - self.handle.setsockopt(zmq.IMMEDIATE, 1 if immediate else 0) - self.handle.identity = six.b(str(uuid.uuid4())) - self.connections = set() - - def _get_serializer(self, serialization): - serializer = self.SERIALIZERS.get(serialization, None) - if serializer is None: - raise NotImplementedError( - "Serialization '{}' is not supported".format(serialization) - ) - return serializer - - def type_name(self): - return zmq_names.socket_type_str(self.socket_type) - - def connections_count(self): - return len(self.connections) - - def connect(self, address): - if address not in self.connections: - self.handle.connect(address) - self.connections.add(address) - - def setsockopt(self, *args, **kwargs): - self.handle.setsockopt(*args, **kwargs) - - def setsockopt_string(self, *args, **kwargs): - self.handle.setsockopt_string(*args, **kwargs) - - def send(self, *args, **kwargs): - self.handle.send(*args, **kwargs) - - def send_string(self, *args, **kwargs): - self.handle.send_string(*args, **kwargs) - - def send_json(self, *args, **kwargs): - self.handle.send_json(*args, **kwargs) - - def send_pyobj(self, *args, **kwargs): - self.handle.send_pyobj(*args, **kwargs) - - def send_multipart(self, *args, **kwargs): - self.handle.send_multipart(*args, **kwargs) - - def send_dumped(self, obj, *args, **kwargs): - serialization = kwargs.pop( - 'serialization', - self.conf.oslo_messaging_zmq.rpc_zmq_serialization) - serializer = self._get_serializer(serialization) - s = serializer.dump_as_bytes(obj) - self.handle.send(s, *args, **kwargs) - - def recv(self, *args, **kwargs): - return self.handle.recv(*args, **kwargs) - - def recv_string(self, *args, **kwargs): - return self.handle.recv_string(*args, **kwargs) - - def recv_json(self, *args, **kwargs): - return self.handle.recv_json(*args, **kwargs) - - def recv_pyobj(self, *args, **kwargs): - return self.handle.recv_pyobj(*args, **kwargs) - - def recv_multipart(self, *args, **kwargs): - return self.handle.recv_multipart(*args, **kwargs) - - def recv_loaded(self, *args, **kwargs): - serialization = kwargs.pop( - 'serialization', - self.conf.oslo_messaging_zmq.rpc_zmq_serialization) - serializer = self._get_serializer(serialization) - s = self.handle.recv(*args, **kwargs) - obj = serializer.load_from_bytes(s) - return obj - - def close(self, *args, **kwargs): - self.handle.close(*args, **kwargs) - - def connect_to_address(self, address): - if address in self.connections: - return - stype = zmq_names.socket_type_str(self.socket_type) - try: - LOG.info(_LI("Connecting %(stype)s id %(id)s to %(address)s"), - {"stype": stype, - "id": self.handle.identity, - "address": address}) - self.connect(address) - except zmq.ZMQError as e: - errmsg = _LE("Failed connecting %(stype)s to %(address)s: %(e)s") \ - % {"stype": stype, "address": address, "e": e} - LOG.error(_LE("Failed connecting %(stype)s to %(address)s: %(e)s"), - {"stype": stype, "address": address, "e": e}) - raise rpc_common.RPCException(errmsg) - - def connect_to_host(self, host): - address = zmq_address.get_tcp_direct_address( - host.decode('utf-8') if six.PY3 and - isinstance(host, six.binary_type) else host - ) - self.connect_to_address(address) - - -class ZmqPortBusy(exceptions.MessagingException): - """Raised when binding to a port failure""" - - def __init__(self, port_number): - super(ZmqPortBusy, self).__init__() - self.port_number = port_number - - -class ZmqRandomPortSocket(ZmqSocket): - - def __init__(self, conf, context, socket_type, host=None, - high_watermark=0): - super(ZmqRandomPortSocket, self).__init__( - conf, context, socket_type, immediate=False, - high_watermark=high_watermark) - self.bind_address = zmq_address.get_tcp_random_address(self.conf) - if host is None: - host = conf.oslo_messaging_zmq.rpc_zmq_host - try: - self.port = self.handle.bind_to_random_port( - self.bind_address, - min_port=conf.oslo_messaging_zmq.rpc_zmq_min_port, - max_port=conf.oslo_messaging_zmq.rpc_zmq_max_port, - max_tries=conf.oslo_messaging_zmq.rpc_zmq_bind_port_retries) - self.connect_address = zmq_address.combine_address(host, self.port) - except zmq.ZMQBindError: - LOG.error(_LE("Random ports range exceeded!")) - raise ZmqPortBusy(port_number=0) - - -class ZmqFixedPortSocket(ZmqSocket): - - def __init__(self, conf, context, socket_type, host, port, - high_watermark=0): - super(ZmqFixedPortSocket, self).__init__( - conf, context, socket_type, immediate=False, - high_watermark=high_watermark) - self.connect_address = zmq_address.combine_address(host, port) - self.bind_address = zmq_address.get_tcp_direct_address( - zmq_address.combine_address( - conf.oslo_messaging_zmq.rpc_zmq_bind_address, port)) - self.host = host - self.port = port - - try: - self.handle.bind(self.bind_address) - except zmq.ZMQError as e: - LOG.exception(e) - LOG.error(_LE("Chosen port %d is being busy.") % self.port) - raise ZmqPortBusy(port_number=port) diff --git a/oslo_messaging/_drivers/zmq_driver/zmq_updater.py b/oslo_messaging/_drivers/zmq_driver/zmq_updater.py deleted file mode 100644 index 2d4f9e0..0000000 --- a/oslo_messaging/_drivers/zmq_driver/zmq_updater.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import logging -import time - -import six - -from oslo_messaging._drivers.zmq_driver import zmq_async - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class UpdaterBase(object): - - def __init__(self, conf, matchmaker, update_method): - self.conf = conf - self.matchmaker = matchmaker - self.update_method = update_method - # make first update immediately - self.update_method() - self.executor = zmq_async.get_executor(method=self._update_loop) - self.executor.execute() - - def stop(self): - self.executor.stop() - - def _update_loop(self): - self.update_method() - time.sleep(self.conf.oslo_messaging_zmq.zmq_target_update) - - def cleanup(self): - self.executor.stop() - - -@six.add_metaclass(abc.ABCMeta) -class ConnectionUpdater(UpdaterBase): - - def __init__(self, conf, matchmaker, socket): - self.socket = socket - super(ConnectionUpdater, self).__init__( - conf, matchmaker, self._update_connection) - - @abc.abstractmethod - def _update_connection(self): - """Update connection info""" diff --git a/oslo_messaging/_i18n.py b/oslo_messaging/_i18n.py deleted file mode 100644 index cc3835c..0000000 --- a/oslo_messaging/_i18n.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html - -""" - -import oslo_i18n - - -_translators = oslo_i18n.TranslatorFactory(domain='oslo_messaging') - -# The primary translation function using the well-known name "_" -_ = _translators.primary - -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical diff --git a/oslo_messaging/_utils.py b/oslo_messaging/_utils.py deleted file mode 100644 index da0012a..0000000 --- a/oslo_messaging/_utils.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - - -def version_is_compatible(imp_version, version): - """Determine whether versions are compatible. - - :param imp_version: The version implemented - :param version: The version requested by an incoming message. - """ - if imp_version is None: - return True - - if version is None: - return False - - version_parts = version.split('.') - imp_version_parts = imp_version.split('.') - try: - rev = version_parts[2] - except IndexError: - rev = 0 - try: - imp_rev = imp_version_parts[2] - except IndexError: - imp_rev = 0 - - if int(version_parts[0]) != int(imp_version_parts[0]): # Major - return False - if int(version_parts[1]) > int(imp_version_parts[1]): # Minor - return False - if (int(version_parts[1]) == int(imp_version_parts[1]) and - int(rev) > int(imp_rev)): # Revision - return False - return True - - -def fetch_current_thread_functor(): - # Until https://github.com/eventlet/eventlet/issues/172 is resolved - # or addressed we have to use complicated workaround to get a object - # that will not be recycled; the usage of threading.current_thread() - # doesn't appear to currently be monkey patched and therefore isn't - # reliable to use (and breaks badly when used as all threads share - # the same current_thread() object)... - try: - import eventlet - from eventlet import patcher - green_threaded = patcher.is_monkey_patched('thread') - except ImportError: - green_threaded = False - if green_threaded: - return lambda: eventlet.getcurrent() - else: - return lambda: threading.current_thread() - - -class DummyLock(object): - def acquire(self): - pass - - def release(self): - pass - - def __enter__(self): - self.acquire() - - def __exit__(self, type, value, traceback): - self.release() diff --git a/oslo_messaging/conffixture.py b/oslo_messaging/conffixture.py deleted file mode 100644 index 5eb4e5e..0000000 --- a/oslo_messaging/conffixture.py +++ /dev/null @@ -1,142 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = ['ConfFixture'] - -import sys - -import fixtures -from functools import wraps - - -def _import_opts(conf, module, opts, group=None): - __import__(module) - conf.register_opts(getattr(sys.modules[module], opts), group=group) - - -class ConfFixture(fixtures.Fixture): - - """Tweak configuration options for unit testing. - - oslo.messaging registers a number of configuration options, but rather than - directly referencing those options, users of the API should use this - interface for querying and overriding certain configuration options. - - An example usage:: - - self.messaging_conf = self.useFixture(messaging.ConfFixture(cfg.CONF)) - self.messaging_conf.transport_driver = 'fake' - - :param conf: a ConfigOpts instance - :type conf: oslo.config.cfg.ConfigOpts - """ - - def __init__(self, conf): - self.conf = conf - _import_opts(self.conf, - 'oslo_messaging._drivers.impl_rabbit', 'rabbit_opts', - 'oslo_messaging_rabbit') - _import_opts(self.conf, - 'oslo_messaging._drivers.base', 'base_opts', - 'oslo_messaging_rabbit') - _import_opts(self.conf, - 'oslo_messaging._drivers.amqp', 'amqp_opts', - 'oslo_messaging_rabbit') - _import_opts(self.conf, - 'oslo_messaging._drivers.amqp1_driver.opts', - 'amqp1_opts', 'oslo_messaging_amqp') - _import_opts(self.conf, - 'oslo_messaging._drivers.zmq_driver.zmq_options', - 'zmq_opts', 'oslo_messaging_zmq') - _import_opts(self.conf, - 'oslo_messaging._drivers.zmq_driver.' - 'matchmaker.matchmaker_redis', - 'matchmaker_redis_opts', - 'matchmaker_redis') - _import_opts(self.conf, 'oslo_messaging.rpc.client', '_client_opts') - _import_opts(self.conf, 'oslo_messaging.transport', '_transport_opts') - _import_opts(self.conf, - 'oslo_messaging.notify.notifier', - '_notifier_opts', - 'oslo_messaging_notifications') - - def _setup_decorator(self): - # Support older test cases that still use the set_override - # with the old config key names - def decorator_for_set_override(wrapped_function): - @wraps(wrapped_function) - def _wrapper(*args, **kwargs): - group = 'oslo_messaging_notifications' - if args[0] == 'notification_driver': - args = ('driver', args[1], group) - elif args[0] == 'notification_transport_url': - args = ('transport_url', args[1], group) - elif args[0] == 'notification_topics': - args = ('topics', args[1], group) - return wrapped_function(*args, **kwargs) - _wrapper.wrapped = wrapped_function - return _wrapper - - def decorator_for_clear_override(wrapped_function): - @wraps(wrapped_function) - def _wrapper(*args, **kwargs): - group = 'oslo_messaging_notifications' - if args[0] == 'notification_driver': - args = ('driver', group) - elif args[0] == 'notification_transport_url': - args = ('transport_url', group) - elif args[0] == 'notification_topics': - args = ('topics', group) - return wrapped_function(*args, **kwargs) - _wrapper.wrapped = wrapped_function - return _wrapper - - if not hasattr(self.conf.set_override, 'wrapped'): - self.conf.set_override = decorator_for_set_override( - self.conf.set_override) - if not hasattr(self.conf.clear_override, 'wrapped'): - self.conf.clear_override = decorator_for_clear_override( - self.conf.clear_override) - - def _teardown_decorator(self): - if hasattr(self.conf.set_override, 'wrapped'): - self.conf.set_override = self.conf.set_override.wrapped - if hasattr(self.conf.clear_override, 'wrapped'): - self.conf.clear_override = self.conf.clear_override.wrapped - - def setUp(self): - super(ConfFixture, self).setUp() - self._setup_decorator() - self.addCleanup(self._teardown_decorator) - self.addCleanup(self.conf.reset) - - @property - def transport_driver(self): - """The transport driver - for example 'rabbit', 'amqp' or 'fake'.""" - return self.conf.rpc_backend - - @transport_driver.setter - def transport_driver(self, value): - self.conf.set_override('rpc_backend', value, enforce_type=True) - - @property - def response_timeout(self): - """Default number of seconds to wait for a response from a call.""" - return self.conf.rpc_response_timeout - - @response_timeout.setter - def response_timeout(self, value): - self.conf.set_override('rpc_response_timeout', value, - enforce_type=True) diff --git a/oslo_messaging/dispatcher.py b/oslo_messaging/dispatcher.py deleted file mode 100644 index 2cd12cb..0000000 --- a/oslo_messaging/dispatcher.py +++ /dev/null @@ -1,37 +0,0 @@ - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import logging - -import six - - -__all__ = [ - "DispatcherBase" -] - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class DispatcherBase(object): - "Base class for dispatcher" - - @abc.abstractmethod - def dispatch(self, incoming): - """Dispatch incoming messages to the endpoints and return result - - :param incoming: incoming object for dispatching to the endpoint - :type incoming: object, depends on endpoint type - """ diff --git a/oslo_messaging/exceptions.py b/oslo_messaging/exceptions.py deleted file mode 100644 index 93f525a..0000000 --- a/oslo_messaging/exceptions.py +++ /dev/null @@ -1,40 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = ['MessagingException', 'MessagingTimeout', 'MessageDeliveryFailure', - 'InvalidTarget'] - -import six - - -class MessagingException(Exception): - """Base class for exceptions.""" - - -class MessagingTimeout(MessagingException): - """Raised if message sending times out.""" - - -class MessageDeliveryFailure(MessagingException): - """Raised if message sending failed after the asked retry.""" - - -class InvalidTarget(MessagingException, ValueError): - """Raised if a target does not meet certain pre-conditions.""" - - def __init__(self, msg, target): - msg = msg + ":" + six.text_type(target) - super(InvalidTarget, self).__init__(msg) - self.target = target diff --git a/oslo_messaging/hacking/__init__.py b/oslo_messaging/hacking/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/hacking/checks.py b/oslo_messaging/hacking/checks.py deleted file mode 100644 index 2df8fb9..0000000 --- a/oslo_messaging/hacking/checks.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -import ast -import six - -oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+") -oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]") -oslo_namespace_imports_from_root = re.compile(r"from[\s]+oslo[\s]+import[\s]+") - - -def check_oslo_namespace_imports(logical_line): - if re.match(oslo_namespace_imports_from_dot, logical_line): - msg = ("O323: '%s' must be used instead of '%s'.") % ( - logical_line.replace('oslo.', 'oslo_'), - logical_line) - yield(0, msg) - elif re.match(oslo_namespace_imports_from_root, logical_line): - msg = ("O323: '%s' must be used instead of '%s'.") % ( - logical_line.replace('from oslo import ', 'import oslo_'), - logical_line) - yield(0, msg) - elif re.match(oslo_namespace_imports_dot, logical_line): - msg = ("O323: '%s' must be used instead of '%s'.") % ( - logical_line.replace('import', 'from').replace('.', ' import '), - logical_line) - yield(0, msg) - - -class BaseASTChecker(ast.NodeVisitor): - """Provides a simple framework for writing AST-based checks. - - Subclasses should implement visit_* methods like any other AST visitor - implementation. When they detect an error for a particular node the - method should call ``self.add_error(offending_node)``. Details about - where in the code the error occurred will be pulled from the node - object. - - Subclasses should also provide a class variable named CHECK_DESC to - be used for the human readable error message. - - """ - - def __init__(self, tree, filename): - """This object is created automatically by pep8. - - :param tree: an AST tree - :param filename: name of the file being analyzed - (ignored by our checks) - """ - self._tree = tree - self._errors = [] - - def run(self): - """Called automatically by pep8.""" - self.visit(self._tree) - return self._errors - - def add_error(self, node, message=None): - """Add an error caused by a node to the list of errors for pep8.""" - message = message or self.CHECK_DESC - error = (node.lineno, node.col_offset, message, self.__class__) - self._errors.append(error) - - -class CheckForLoggingIssues(BaseASTChecker): - - DEBUG_CHECK_DESC = 'O324 Using translated string in debug logging' - NONDEBUG_CHECK_DESC = 'O325 Not using translating helper for logging' - EXCESS_HELPER_CHECK_DESC = 'O326 Using hints when _ is necessary' - LOG_MODULES = ('logging') - I18N_MODULES = ( - 'oslo_messaging._i18n._', - 'oslo_messaging._i18n._LI', - 'oslo_messaging._i18n._LW', - 'oslo_messaging._i18n._LE', - 'oslo_messaging._i18n._LC', - ) - TRANS_HELPER_MAP = { - 'debug': None, - 'info': '_LI', - 'warn': '_LW', - 'warning': '_LW', - 'error': '_LE', - 'exception': '_LE', - 'critical': '_LC', - } - - def __init__(self, tree, filename): - super(CheckForLoggingIssues, self).__init__(tree, filename) - - self.logger_names = [] - self.logger_module_names = [] - self.i18n_names = {} - - # NOTE(dstanek): this kinda accounts for scopes when talking - # about only leaf node in the graph - self.assignments = {} - - def generic_visit(self, node): - """Called if no explicit visitor function exists for a node.""" - for field, value in ast.iter_fields(node): - if isinstance(value, list): - for item in value: - if isinstance(item, ast.AST): - item._parent = node - self.visit(item) - elif isinstance(value, ast.AST): - value._parent = node - self.visit(value) - - def _filter_imports(self, module_name, alias): - """Keeps lists of logging and i18n imports.""" - if module_name in self.LOG_MODULES: - self.logger_module_names.append(alias.asname or alias.name) - elif module_name in self.I18N_MODULES: - self.i18n_names[alias.asname or alias.name] = alias.name - - def visit_Import(self, node): - for alias in node.names: - self._filter_imports(alias.name, alias) - return super(CheckForLoggingIssues, self).generic_visit(node) - - def visit_ImportFrom(self, node): - for alias in node.names: - full_name = '%s.%s' % (node.module, alias.name) - self._filter_imports(full_name, alias) - return super(CheckForLoggingIssues, self).generic_visit(node) - - def _find_name(self, node): - """Return the fully qualified name or a Name or Attribute.""" - if isinstance(node, ast.Name): - return node.id - elif (isinstance(node, ast.Attribute) - and isinstance(node.value, (ast.Name, ast.Attribute))): - method_name = node.attr - obj_name = self._find_name(node.value) - if obj_name is None: - return None - return obj_name + '.' + method_name - elif isinstance(node, six.string_types): - return node - else: # could be Subscript, Call or many more - return None - - def visit_Assign(self, node): - """Look for 'LOG = logging.getLogger' - - This handles the simple case: - name = [logging_module].getLogger(...) - - - or - - - name = [i18n_name](...) - - And some much more comple ones: - name = [i18n_name](...) % X - - - or - - - self.name = [i18n_name](...) % X - - """ - attr_node_types = (ast.Name, ast.Attribute) - - if (len(node.targets) != 1 - or not isinstance(node.targets[0], attr_node_types)): - # say no to: "x, y = ..." - return super(CheckForLoggingIssues, self).generic_visit(node) - - target_name = self._find_name(node.targets[0]) - - if (isinstance(node.value, ast.BinOp) and - isinstance(node.value.op, ast.Mod)): - if (isinstance(node.value.left, ast.Call) and - isinstance(node.value.left.func, ast.Name) and - node.value.left.func.id in self.i18n_names): - # NOTE(dstanek): this is done to match cases like: - # `msg = _('something %s') % x` - node = ast.Assign(value=node.value.left) - - if not isinstance(node.value, ast.Call): - # node.value must be a call to getLogger - self.assignments.pop(target_name, None) - return super(CheckForLoggingIssues, self).generic_visit(node) - - # is this a call to an i18n function? - if (isinstance(node.value.func, ast.Name) - and node.value.func.id in self.i18n_names): - self.assignments[target_name] = node.value.func.id - return super(CheckForLoggingIssues, self).generic_visit(node) - - if (not isinstance(node.value.func, ast.Attribute) - or not isinstance(node.value.func.value, attr_node_types)): - # function must be an attribute on an object like - # logging.getLogger - return super(CheckForLoggingIssues, self).generic_visit(node) - - object_name = self._find_name(node.value.func.value) - func_name = node.value.func.attr - - if (object_name in self.logger_module_names - and func_name == 'getLogger'): - self.logger_names.append(target_name) - - return super(CheckForLoggingIssues, self).generic_visit(node) - - def visit_Call(self, node): - """Look for the 'LOG.*' calls.""" - # obj.method - if isinstance(node.func, ast.Attribute): - obj_name = self._find_name(node.func.value) - if isinstance(node.func.value, ast.Name): - method_name = node.func.attr - elif isinstance(node.func.value, ast.Attribute): - obj_name = self._find_name(node.func.value) - method_name = node.func.attr - else: # could be Subscript, Call or many more - return super(CheckForLoggingIssues, self).generic_visit(node) - - # if dealing with a logger the method can't be "warn" - if obj_name in self.logger_names and method_name == 'warn': - msg = node.args[0] # first arg to a logging method is the msg - self.add_error(msg, message=self.USING_DEPRECATED_WARN) - - # must be a logger instance and one of the support logging methods - if (obj_name not in self.logger_names - or method_name not in self.TRANS_HELPER_MAP): - return super(CheckForLoggingIssues, self).generic_visit(node) - - # the call must have arguments - if not node.args: - return super(CheckForLoggingIssues, self).generic_visit(node) - - if method_name == 'debug': - self._process_debug(node) - elif method_name in self.TRANS_HELPER_MAP: - self._process_non_debug(node, method_name) - - return super(CheckForLoggingIssues, self).generic_visit(node) - - def _process_debug(self, node): - msg = node.args[0] # first arg to a logging method is the msg - - # if first arg is a call to a i18n name - if (isinstance(msg, ast.Call) - and isinstance(msg.func, ast.Name) - and msg.func.id in self.i18n_names): - self.add_error(msg, message=self.DEBUG_CHECK_DESC) - - # if the first arg is a reference to a i18n call - elif (isinstance(msg, ast.Name) - and msg.id in self.assignments - and not self._is_raised_later(node, msg.id)): - self.add_error(msg, message=self.DEBUG_CHECK_DESC) - - def _process_non_debug(self, node, method_name): - msg = node.args[0] # first arg to a logging method is the msg - - # if first arg is a call to a i18n name - if isinstance(msg, ast.Call): - try: - func_name = msg.func.id - except AttributeError: - # in the case of logging only an exception, the msg function - # will not have an id associated with it, for instance: - # LOG.warning(six.text_type(e)) - return - - # the function name is the correct translation helper - # for the logging method - if func_name == self.TRANS_HELPER_MAP[method_name]: - return - - # the function name is an alias for the correct translation - # helper for the loggine method - if (self.i18n_names[func_name] == - self.TRANS_HELPER_MAP[method_name]): - return - - self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) - - # if the first arg is not a reference to the correct i18n hint - elif isinstance(msg, ast.Name): - - # FIXME(dstanek): to make sure more robust we should be checking - # all names passed into a logging method. we can't right now - # because: - # 1. We have code like this that we'll fix when dealing with the %: - # msg = _('....') % {} - # LOG.warning(msg) - # 2. We also do LOG.exception(e) in several places. I'm not sure - # exactly what we should be doing about that. - if msg.id not in self.assignments: - return - - helper_method_name = self.TRANS_HELPER_MAP[method_name] - if (self.assignments[msg.id] != helper_method_name - and not self._is_raised_later(node, msg.id)): - self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) - elif (self.assignments[msg.id] == helper_method_name - and self._is_raised_later(node, msg.id)): - self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC) - - def _is_raised_later(self, node, name): - - def find_peers(node): - node_for_line = node._parent - for _field, value in ast.iter_fields(node._parent._parent): - if isinstance(value, list) and node_for_line in value: - return value[value.index(node_for_line) + 1:] - continue - return [] - - peers = find_peers(node) - for peer in peers: - if isinstance(peer, ast.Raise): - if six.PY3: - exc = peer.exc - else: - exc = peer.type - if (isinstance(exc, ast.Call) and - len(exc.args) > 0 and - isinstance(exc.args[0], ast.Name) and - name in (a.id for a in exc.args)): - return True - else: - return False - elif isinstance(peer, ast.Assign): - if name in (t.id for t in peer.targets if hasattr(t, 'id')): - return False - - -def factory(register): - register(CheckForLoggingIssues) - register(check_oslo_namespace_imports) diff --git a/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-error.po b/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-error.po deleted file mode 100644 index ffe36c8..0000000 --- a/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-error.po +++ /dev/null @@ -1,61 +0,0 @@ -# Translations template for oslo.messaging. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.messaging -# project. -# -# Translators: -# Andi Chandler , 2014-2015 -# Andi Chandler , 2016. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.messaging 5.5.1.dev3\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-07-01 03:41+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-28 05:52+0000\n" -"Last-Translator: Andi Chandler \n" -"Language: en-GB\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: English (United Kingdom)\n" - -#, python-format -msgid "An exception occurred processing the API call: %s " -msgstr "An exception occurred processing the API call: %s " - -msgid "Can not acknowledge message. Skip processing" -msgstr "Can not acknowledge message. Skip processing" - -msgid "Can not send reply for message" -msgstr "Can not send reply for message" - -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" -msgstr "Could not send notification to %(topic)s. Payload=%(message)s" - -msgid "Exception during message handling" -msgstr "Exception during message handling" - -msgid "Exception during message handling." -msgstr "Exception during message handling." - -msgid "Exception during messages handling." -msgstr "Exception during messages handling." - -msgid "Fail to ack/requeue message." -msgstr "Fail to ack/requeue message." - -#, python-format -msgid "" -"Problem '%(e)s' attempting to send to notification system. Payload=" -"%(payload)s" -msgstr "" -"Problem '%(e)s' attempting to send to notification system. Payload=" -"%(payload)s" - -msgid "Unexpected exception occurred." -msgstr "Unexpected exception occurred." diff --git a/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-info.po b/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-info.po deleted file mode 100644 index 2678d6b..0000000 --- a/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-info.po +++ /dev/null @@ -1,34 +0,0 @@ -# Translations template for oslo.messaging. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.messaging -# project. -# -# Translators: -# Andi Chandler , 2014-2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.messaging 5.2.1.dev12\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 15:53+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-01 09:26+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language: en-GB\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: English (United Kingdom)\n" - -#, python-format -msgid "Routing '%(event)s' notification to '%(driver)s' driver" -msgstr "Routing '%(event)s' notification to '%(driver)s' driver" - -msgid "" -"blocking executor handles only one message at once. threading or eventlet " -"executor is recommended." -msgstr "" -"blocking executor handles only one message at once. threading or eventlet " -"executor is recommended." diff --git a/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-warning.po b/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-warning.po deleted file mode 100644 index 486ab02..0000000 --- a/oslo_messaging/locale/en_GB/LC_MESSAGES/oslo_messaging-log-warning.po +++ /dev/null @@ -1,45 +0,0 @@ -# Translations template for oslo.messaging. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.messaging -# project. -# -# Translators: -# Andi Chandler , 2014-2015 -# Andi Chandler , 2016. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.messaging 5.5.1.dev3\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-07-01 03:41+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-28 05:52+0000\n" -"Last-Translator: Andi Chandler \n" -"Language: en-GB\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: English (United Kingdom)\n" - -#, python-format -msgid "Failed to load any notifiers for %s" -msgstr "Failed to load any notifiers for %s" - -#, python-format -msgid "Possible hang: %s" -msgstr "Possible hang: %s" - -msgid "" -"Restarting a MessageHandlingServer is inherently racy. It is deprecated, and " -"will become a noop in a future release of oslo.messaging. If you need to " -"restart MessageHandlingServer you should instantiate a new object." -msgstr "" -"Restarting a MessageHandlingServer is inherently racy. It is deprecated, and " -"will become a noop in a future release of oslo.messaging. If you need to " -"restart MessageHandlingServer you should instantiate a new object." - -#, python-format -msgid "Unknown priority \"%s\"" -msgstr "Unknown priority \"%s\"" diff --git a/oslo_messaging/notify/__init__.py b/oslo_messaging/notify/__init__.py deleted file mode 100644 index 912e633..0000000 --- a/oslo_messaging/notify/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = ['Notifier', - 'LoggingNotificationHandler', - 'get_notification_transport', - 'get_notification_listener', - 'get_batch_notification_listener', - 'NotificationResult', - 'NotificationFilter', - 'PublishErrorsHandler', - 'LoggingErrorNotificationHandler'] - -from .filter import NotificationFilter -from .notifier import * -from .listener import * -from .log_handler import * -from .logger import * -from .dispatcher import NotificationResult diff --git a/oslo_messaging/notify/_impl_log.py b/oslo_messaging/notify/_impl_log.py deleted file mode 100644 index 7322f07..0000000 --- a/oslo_messaging/notify/_impl_log.py +++ /dev/null @@ -1,47 +0,0 @@ - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import warnings - -from oslo_serialization import jsonutils -from oslo_utils import strutils - -from oslo_messaging.notify import notifier - - -class LogDriver(notifier.Driver): - - "Publish notifications via Python logging infrastructure." - - # NOTE(dhellmann): For backwards-compatibility with configurations - # that may have modified the settings for this logger using a - # configuration file, we keep the name - # 'oslo.messaging.notification' even though the package is now - # 'oslo_messaging'. - LOGGER_BASE = 'oslo.messaging.notification' - - def notify(self, ctxt, message, priority, retry): - logger = logging.getLogger('%s.%s' % (self.LOGGER_BASE, - message['event_type'])) - method = getattr(logger, priority.lower(), None) - if method: - method(jsonutils.dumps(strutils.mask_dict_password(message))) - else: - warnings.warn('Unable to log message as notify cannot find a ' - 'logger with the priority specified ' - '%s' % priority.lower()) diff --git a/oslo_messaging/notify/_impl_messaging.py b/oslo_messaging/notify/_impl_messaging.py deleted file mode 100644 index 9f7c571..0000000 --- a/oslo_messaging/notify/_impl_messaging.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from debtcollector import removals - -from oslo_messaging.notify.messaging import * # noqa - - -# NOTE(mriedem): removal depends on how we can cap requirements in -# stable/liberty such that neutron does not try to load this -removals.removed_module(__name__, - oslo_messaging.notify.messaging.__name__, - removal_version='?') diff --git a/oslo_messaging/notify/_impl_noop.py b/oslo_messaging/notify/_impl_noop.py deleted file mode 100644 index 9f8f2a9..0000000 --- a/oslo_messaging/notify/_impl_noop.py +++ /dev/null @@ -1,24 +0,0 @@ - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_messaging.notify import notifier - - -class NoOpDriver(notifier.Driver): - - def notify(self, ctxt, message, priority, retry): - pass diff --git a/oslo_messaging/notify/_impl_routing.py b/oslo_messaging/notify/_impl_routing.py deleted file mode 100644 index c7136d0..0000000 --- a/oslo_messaging/notify/_impl_routing.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2014 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from oslo_config import cfg -from oslo_utils import fnmatch -import six -from stevedore import dispatch -import yaml - -from oslo_messaging._i18n import _LI, _LW -from oslo_messaging.notify import notifier - - -LOG = logging.getLogger(__name__) - -router_config = cfg.StrOpt('routing_config', default='', - deprecated_group='DEFAULT', - deprecated_name='routing_notifier_config', - help='RoutingNotifier configuration file location.') - -CONF = cfg.CONF -CONF.register_opt(router_config, group='oslo_messaging_notifications') - - -class RoutingDriver(notifier.Driver): - NOTIFIER_PLUGIN_NAMESPACE = 'oslo.messaging.notify.drivers' - - plugin_manager = None - routing_groups = None # The routing groups from the config file. - used_drivers = None # Used driver names, extracted from config file. - - def _should_load_plugin(self, ext, *args, **kwargs): - # Hack to keep stevedore from circular importing since these - # endpoints are used for different purposes. - if ext.name == 'routing': - return False - return ext.name in self.used_drivers - - def _get_notifier_config_file(self, filename): - """Broken out for testing.""" - return open(filename, 'r') - - def _load_notifiers(self): - """One-time load of notifier config file.""" - self.routing_groups = {} - self.used_drivers = set() - filename = CONF.oslo_messaging_notifications.routing_config - if not filename: - return - - # Infer which drivers are used from the config file. - self.routing_groups = yaml.safe_load( - self._get_notifier_config_file(filename)) - if not self.routing_groups: - self.routing_groups = {} # In case we got None from load() - return - - for group in self.routing_groups.values(): - self.used_drivers.update(group.keys()) - - LOG.debug('loading notifiers from %s', self.NOTIFIER_PLUGIN_NAMESPACE) - self.plugin_manager = dispatch.DispatchExtensionManager( - namespace=self.NOTIFIER_PLUGIN_NAMESPACE, - check_func=self._should_load_plugin, - invoke_on_load=True, - invoke_args=None) - if not list(self.plugin_manager): - LOG.warning(_LW("Failed to load any notifiers for %s"), - self.NOTIFIER_PLUGIN_NAMESPACE) - - def _get_drivers_for_message(self, group, event_type, priority): - """Which drivers should be called for this event_type - or priority. - """ - accepted_drivers = set() - - for driver, rules in six.iteritems(group): - checks = [] - for key, patterns in six.iteritems(rules): - if key == 'accepted_events': - c = [fnmatch.fnmatch(event_type, p) - for p in patterns] - checks.append(any(c)) - if key == 'accepted_priorities': - c = [fnmatch.fnmatch(priority, p.lower()) - for p in patterns] - checks.append(any(c)) - if all(checks): - accepted_drivers.add(driver) - - return list(accepted_drivers) - - def _filter_func(self, ext, context, message, priority, retry, - accepted_drivers): - """True/False if the driver should be called for this message. - """ - # context is unused here, but passed in by map() - return ext.name in accepted_drivers - - def _call_notify(self, ext, context, message, priority, retry, - accepted_drivers): - """Emit the notification. - """ - # accepted_drivers is passed in as a result of the map() function - LOG.info(_LI("Routing '%(event)s' notification to '%(driver)s' " - "driver"), - {'event': message.get('event_type'), 'driver': ext.name}) - ext.obj.notify(context, message, priority, retry) - - def notify(self, context, message, priority, retry): - if not self.plugin_manager: - self._load_notifiers() - - # Fail if these aren't present ... - event_type = message['event_type'] - - accepted_drivers = set() - for group in self.routing_groups.values(): - accepted_drivers.update( - self._get_drivers_for_message(group, event_type, - priority.lower())) - self.plugin_manager.map(self._filter_func, self._call_notify, context, - message, priority, retry, - list(accepted_drivers)) diff --git a/oslo_messaging/notify/_impl_test.py b/oslo_messaging/notify/_impl_test.py deleted file mode 100644 index 1c817b2..0000000 --- a/oslo_messaging/notify/_impl_test.py +++ /dev/null @@ -1,34 +0,0 @@ - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_messaging.notify import notifier - -NOTIFICATIONS = [] - - -def reset(): - "Clear out the list of recorded notifications." - global NOTIFICATIONS - NOTIFICATIONS = [] - - -class TestDriver(notifier.Driver): - - "Store notifications in memory for test verification." - - def notify(self, ctxt, message, priority, retry): - NOTIFICATIONS.append((ctxt, message, priority, retry)) diff --git a/oslo_messaging/notify/dispatcher.py b/oslo_messaging/notify/dispatcher.py deleted file mode 100644 index caa80e0..0000000 --- a/oslo_messaging/notify/dispatcher.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import logging - -import six - -from oslo_messaging._i18n import _LW -from oslo_messaging import dispatcher -from oslo_messaging import serializer as msg_serializer - - -LOG = logging.getLogger(__name__) - -PRIORITIES = ['audit', 'debug', 'info', 'warn', 'error', 'critical', 'sample'] - - -class NotificationResult(object): - HANDLED = 'handled' - REQUEUE = 'requeue' - - -class NotificationDispatcher(dispatcher.DispatcherBase): - def __init__(self, endpoints, serializer): - - self.endpoints = endpoints - self.serializer = serializer or msg_serializer.NoOpSerializer() - - self._callbacks_by_priority = {} - for endpoint, prio in itertools.product(endpoints, PRIORITIES): - if hasattr(endpoint, prio): - method = getattr(endpoint, prio) - screen = getattr(endpoint, 'filter_rule', None) - self._callbacks_by_priority.setdefault(prio, []).append( - (screen, method)) - - @property - def supported_priorities(self): - return self._callbacks_by_priority.keys() - - def dispatch(self, incoming): - """Dispatch notification messages to the appropriate endpoint method. - """ - priority, raw_message, message = self._extract_user_message(incoming) - - if priority not in PRIORITIES: - LOG.warning(_LW('Unknown priority "%s"'), priority) - return - - for screen, callback in self._callbacks_by_priority.get(priority, - []): - if screen and not screen.match(message["ctxt"], - message["publisher_id"], - message["event_type"], - message["metadata"], - message["payload"]): - continue - - ret = self._exec_callback(callback, message) - if ret == NotificationResult.REQUEUE: - return ret - return NotificationResult.HANDLED - - def _exec_callback(self, callback, message): - try: - return callback(message["ctxt"], - message["publisher_id"], - message["event_type"], - message["payload"], - message["metadata"]) - except Exception: - LOG.exception("Callback raised an exception.") - return NotificationResult.REQUEUE - - def _extract_user_message(self, incoming): - ctxt = self.serializer.deserialize_context(incoming.ctxt) - message = incoming.message - - publisher_id = message.get('publisher_id') - event_type = message.get('event_type') - metadata = { - 'message_id': message.get('message_id'), - 'timestamp': message.get('timestamp') - } - priority = message.get('priority', '').lower() - payload = self.serializer.deserialize_entity(ctxt, - message.get('payload')) - return priority, incoming, dict(ctxt=ctxt, - publisher_id=publisher_id, - event_type=event_type, - payload=payload, - metadata=metadata) - - -class BatchNotificationDispatcher(NotificationDispatcher): - """A message dispatcher which understands Notification messages. - - A MessageHandlingServer is constructed by passing a callable dispatcher - which is invoked with a list of message dictionaries each time 'batch_size' - messages are received or 'batch_timeout' seconds is reached. - """ - - def dispatch(self, incoming): - """Dispatch notification messages to the appropriate endpoint method. - """ - - messages_grouped = itertools.groupby(( - self._extract_user_message(m) - for m in incoming), lambda x: x[0]) - - requeues = set() - for priority, messages in messages_grouped: - __, raw_messages, messages = six.moves.zip(*messages) - raw_messages = list(raw_messages) - messages = list(messages) - if priority not in PRIORITIES: - LOG.warning(_LW('Unknown priority "%s"'), priority) - continue - for screen, callback in self._callbacks_by_priority.get(priority, - []): - if screen: - filtered_messages = [message for message in messages - if screen.match( - message["ctxt"], - message["publisher_id"], - message["event_type"], - message["metadata"], - message["payload"])] - else: - filtered_messages = messages - - if not filtered_messages: - continue - - ret = self._exec_callback(callback, filtered_messages) - if ret == NotificationResult.REQUEUE: - requeues.update(raw_messages) - break - return requeues - - def _exec_callback(self, callback, messages): - try: - return callback(messages) - except Exception: - LOG.exception("Callback raised an exception.") - return NotificationResult.REQUEUE diff --git a/oslo_messaging/notify/filter.py b/oslo_messaging/notify/filter.py deleted file mode 100644 index b23fac4..0000000 --- a/oslo_messaging/notify/filter.py +++ /dev/null @@ -1,77 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - - -class NotificationFilter(object): - - """Filter notification messages - - The NotificationFilter class is used to filter notifications that an - endpoint will received. - - The notification can be filter on different fields: context, - publisher_id, event_type, metadata and payload. - - The filter is done via a regular expression - - filter_rule = NotificationFilter( - publisher_id='^compute.*', - context={'tenant_id': '^5f643cfc-664b-4c69-8000-ce2ed7b08216$', - 'roles='private'}, - event_type='^compute\.instance\..*', - metadata={'timestamp': 'Aug'}, - payload={'state': '^active$') - - """ - - def __init__(self, context=None, publisher_id=None, event_type=None, - metadata=None, payload=None): - self._regex_publisher_id = None - self._regex_event_type = None - - if publisher_id is not None: - self._regex_publisher_id = re.compile(publisher_id) - if event_type is not None: - self._regex_event_type = re.compile(event_type) - self._regexs_context = self._build_regex_dict(context) - self._regexs_metadata = self._build_regex_dict(metadata) - self._regexs_payload = self._build_regex_dict(payload) - - @staticmethod - def _build_regex_dict(regex_list): - if regex_list is None: - return {} - return dict((k, re.compile(regex_list[k])) for k in regex_list) - - @staticmethod - def _check_for_mismatch(data, regex): - if isinstance(regex, dict): - for k in regex: - if (k not in data or not regex[k].match(data[k])): - return True - elif regex is not None and not regex.match(data): - return True - return False - - def match(self, context, publisher_id, event_type, metadata, payload): - if (self._check_for_mismatch(publisher_id, self._regex_publisher_id) or - self._check_for_mismatch(event_type, self._regex_event_type) or - self._check_for_mismatch(context, self._regexs_context) or - self._check_for_mismatch(metadata, self._regexs_metadata) or - self._check_for_mismatch(payload, self._regexs_payload)): - return False - return True diff --git a/oslo_messaging/notify/listener.py b/oslo_messaging/notify/listener.py deleted file mode 100644 index 386e79e..0000000 --- a/oslo_messaging/notify/listener.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""A notification listener exposes a number of endpoints, each of which -contain a set of methods. Each method corresponds to a notification priority. - -To create a notification listener, you supply a transport, list of targets and -a list of endpoints. - -A transport can be obtained simply by calling the get_notification_transport() -method:: - - transport = messaging.get_notification_transport(conf) - -which will load the appropriate transport driver according to the user's -messaging configuration. See get_notification_transport() for more details. - -The target supplied when creating a notification listener expresses the topic -and - optionally - the exchange to listen on. See Target for more details -on these attributes. - -Notification listener have start(), stop() and wait() messages to begin -handling requests, stop handling requests and wait for all in-process -requests to complete. - -Each notification listener is associated with an executor which integrates the -listener with a specific I/O handling framework. Currently, there are blocking -and eventlet executors available. - -A simple example of a notification listener with multiple endpoints might be:: - - from oslo_config import cfg - import oslo_messaging - - class NotificationEndpoint(object): - filter_rule = NotificationFilter(publisher_id='^compute.*') - - def warn(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - class ErrorEndpoint(object): - filter_rule = NotificationFilter(event_type='^instance\..*\.start$', - context={'ctxt_key': 'regexp'}) - - def error(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - transport = oslo_messaging.get_notification_transport(cfg.CONF) - targets = [ - oslo_messaging.Target(topic='notifications'), - oslo_messaging.Target(topic='notifications_bis') - ] - endpoints = [ - NotificationEndpoint(), - ErrorEndpoint(), - ] - pool = "listener-workers" - server = oslo_messaging.get_notification_listener(transport, targets, - endpoints, pool) - server.start() - server.wait() - -A notifier sends a notification on a topic with a priority, the notification -listener will receive this notification if the topic of this one have been set -in one of the targets and if an endpoint implements the method named like the -priority and if the notification match the NotificationFilter rule set into -the filter_rule attribute of the endpoint. - -Parameters to endpoint methods are the request context supplied by the client, -the publisher_id of the notification message, the event_type, the payload and -metadata. The metadata parameter is a mapping containing a unique message_id -and a timestamp. - -By supplying a serializer object, a listener can deserialize a request context -and arguments from - and serialize return values to - primitive types. - -By supplying a pool name you can create multiple groups of listeners consuming -notifications and that each group only receives one copy of each -notification. - -An endpoint method can explicitly return -oslo_messaging.NotificationResult.HANDLED to acknowledge a message or -oslo_messaging.NotificationResult.REQUEUE to requeue the message. - -The message is acknowledged only if all endpoints either return -oslo_messaging.NotificationResult.HANDLED or None. - -Note that not all transport drivers implement support for requeueing. In order -to use this feature, applications should assert that the feature is available -by passing allow_requeue=True to get_notification_listener(). If the driver -does not support requeueing, it will raise NotImplementedError at this point. - -""" -import itertools -import logging - -from oslo_messaging._i18n import _LE -from oslo_messaging.notify import dispatcher as notify_dispatcher -from oslo_messaging import server as msg_server - -LOG = logging.getLogger(__name__) - - -class NotificationServerBase(msg_server.MessageHandlingServer): - def __init__(self, transport, targets, dispatcher, executor='blocking', - allow_requeue=True, pool=None, batch_size=1, - batch_timeout=None): - super(NotificationServerBase, self).__init__(transport, dispatcher, - executor) - self._allow_requeue = allow_requeue - self._pool = pool - self.targets = targets - self._targets_priorities = set( - itertools.product(self.targets, - self.dispatcher.supported_priorities) - ) - - self._batch_size = batch_size - self._batch_timeout = batch_timeout - - def _create_listener(self): - return self.transport._listen_for_notifications( - self._targets_priorities, self._pool, self._batch_size, - self._batch_timeout - ) - - -class NotificationServer(NotificationServerBase): - def __init__(self, transport, targets, dispatcher, executor='blocking', - allow_requeue=True, pool=None): - super(NotificationServer, self).__init__( - transport, targets, dispatcher, executor, allow_requeue, pool, 1, - None - ) - - def _process_incoming(self, incoming): - message = incoming[0] - try: - res = self.dispatcher.dispatch(message) - except Exception: - LOG.exception(_LE('Exception during message handling.')) - res = notify_dispatcher.NotificationResult.REQUEUE - - try: - if (res == notify_dispatcher.NotificationResult.REQUEUE and - self._allow_requeue): - message.requeue() - else: - message.acknowledge() - except Exception: - LOG.exception(_LE("Fail to ack/requeue message.")) - - -class BatchNotificationServer(NotificationServerBase): - - def _process_incoming(self, incoming): - try: - not_processed_messages = self.dispatcher.dispatch(incoming) - except Exception: - not_processed_messages = set(incoming) - LOG.exception(_LE('Exception during messages handling.')) - for m in incoming: - try: - if m in not_processed_messages and self._allow_requeue: - m.requeue() - else: - m.acknowledge() - except Exception: - LOG.exception(_LE("Fail to ack/requeue message.")) - - -def get_notification_listener(transport, targets, endpoints, - executor='blocking', serializer=None, - allow_requeue=False, pool=None): - """Construct a notification listener - - The executor parameter controls how incoming messages will be received and - dispatched. By default, the most simple executor is used - the blocking - executor. - - If the eventlet executor is used, the threading and time library need to be - monkeypatched. - - :param transport: the messaging transport - :type transport: Transport - :param targets: the exchanges and topics to listen on - :type targets: list of Target - :param endpoints: a list of endpoint objects - :type endpoints: list - :param executor: name of a message executor - for example - 'eventlet', 'blocking' - :type executor: str - :param serializer: an optional entity serializer - :type serializer: Serializer - :param allow_requeue: whether NotificationResult.REQUEUE support is needed - :type allow_requeue: bool - :param pool: the pool name - :type pool: str - :raises: NotImplementedError - """ - dispatcher = notify_dispatcher.NotificationDispatcher(endpoints, - serializer) - return NotificationServer(transport, targets, dispatcher, executor, - allow_requeue, pool) - - -def get_batch_notification_listener(transport, targets, endpoints, - executor='blocking', serializer=None, - allow_requeue=False, pool=None, - batch_size=None, batch_timeout=None): - """Construct a batch notification listener - - The executor parameter controls how incoming messages will be received and - dispatched. By default, the most simple executor is used - the blocking - executor. - - If the eventlet executor is used, the threading and time library need to be - monkeypatched. - - :param transport: the messaging transport - :type transport: Transport - :param targets: the exchanges and topics to listen on - :type targets: list of Target - :param endpoints: a list of endpoint objects - :type endpoints: list - :param executor: name of a message executor - for example - 'eventlet', 'blocking' - :type executor: str - :param serializer: an optional entity serializer - :type serializer: Serializer - :param allow_requeue: whether NotificationResult.REQUEUE support is needed - :type allow_requeue: bool - :param pool: the pool name - :type pool: str - :param batch_size: number of messages to wait before calling - endpoints callacks - :type batch_size: int - :param batch_timeout: number of seconds to wait before calling - endpoints callacks - :type batch_timeout: int - :raises: NotImplementedError - """ - dispatcher = notify_dispatcher.BatchNotificationDispatcher( - endpoints, serializer) - return BatchNotificationServer( - transport, targets, dispatcher, executor, allow_requeue, pool, - batch_size, batch_timeout - ) diff --git a/oslo_messaging/notify/log_handler.py b/oslo_messaging/notify/log_handler.py deleted file mode 100644 index 8dc8454..0000000 --- a/oslo_messaging/notify/log_handler.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from oslo_config import cfg - - -class LoggingErrorNotificationHandler(logging.Handler): - def __init__(self, *args, **kwargs): - # NOTE(dhellmann): Avoid a cyclical import by doing this one - # at runtime. - import oslo_messaging - logging.Handler.__init__(self, *args, **kwargs) - self._transport = oslo_messaging.get_notification_transport(cfg.CONF) - self._notifier = oslo_messaging.Notifier( - self._transport, - publisher_id='error.publisher') - - def emit(self, record): - conf = self._transport.conf - # NOTE(bnemec): Notifier registers this opt with the transport. - if ('log' in conf.oslo_messaging_notifications.driver): - # NOTE(lbragstad): If we detect that log is one of the - # notification drivers, then return. This protects from infinite - # recursion where something bad happens, it gets logged, the log - # handler sends a notification, and the log_notifier sees the - # notification and logs it. - return - self._notifier.error({}, - 'error_notification', - dict(error=record.msg)) - - -PublishErrorsHandler = LoggingErrorNotificationHandler diff --git a/oslo_messaging/notify/logger.py b/oslo_messaging/notify/logger.py deleted file mode 100644 index b4e48df..0000000 --- a/oslo_messaging/notify/logger.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Driver for the Python logging package that sends log records as a notification. -""" -import logging - -from oslo_config import cfg - -from oslo_messaging.notify import notifier - - -class LoggingNotificationHandler(logging.Handler): - """Handler for logging to the messaging notification system. - - Each time the application logs a message using the :py:mod:`logging` - module, it will be sent as a notification. The severity used for the - notification will be the same as the one used for the log record. - - This can be used into a Python logging configuration this way:: - - [handler_notifier] - class=oslo_messaging.LoggingNotificationHandler - level=ERROR - args=('rabbit:///') - - """ - - CONF = cfg.CONF - """Default configuration object used, subclass this class if you want to - use another one. - - """ - - def __init__(self, url, publisher_id=None, driver=None, - topic=None, serializer=None): - self.notifier = notifier.Notifier( - notifier.get_notification_transport(self.CONF, url), - publisher_id, driver, - topic, - serializer() if serializer else None) - logging.Handler.__init__(self) - - def emit(self, record): - """Emit the log record to the messaging notification system. - - :param record: A log record to emit. - - """ - method = getattr(self.notifier, record.levelname.lower(), None) - - if not method: - return - - method( - {}, - 'logrecord', - { - 'name': record.name, - 'levelno': record.levelno, - 'levelname': record.levelname, - 'exc_info': record.exc_info, - 'pathname': record.pathname, - 'lineno': record.lineno, - 'msg': record.getMessage(), - 'funcName': record.funcName, - 'thread': record.thread, - 'processName': record.processName, - 'process': record.process, - 'extra': getattr(record, 'extra', None), - } - ) diff --git a/oslo_messaging/notify/messaging.py b/oslo_messaging/notify/messaging.py deleted file mode 100644 index e7642ca..0000000 --- a/oslo_messaging/notify/messaging.py +++ /dev/null @@ -1,61 +0,0 @@ - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import oslo_messaging -from oslo_messaging._i18n import _LE -from oslo_messaging.notify import notifier - -LOG = logging.getLogger(__name__) - - -class MessagingDriver(notifier.Driver): - - """Send notifications using the 1.0 message format. - - This driver sends notifications over the configured messaging transport, - but without any message envelope (also known as message format 1.0). - - This driver should only be used in cases where there are existing consumers - deployed which do not support the 2.0 message format. - """ - - def __init__(self, conf, topics, transport, version=1.0): - super(MessagingDriver, self).__init__(conf, topics, transport) - self.version = version - - def notify(self, ctxt, message, priority, retry): - priority = priority.lower() - for topic in self.topics: - target = oslo_messaging.Target(topic='%s.%s' % (topic, priority)) - try: - self.transport._send_notification(target, ctxt, message, - version=self.version, - retry=retry) - except Exception: - LOG.exception(_LE("Could not send notification to %(topic)s. " - "Payload=%(message)s"), - dict(topic=topic, message=message)) - - -class MessagingV2Driver(MessagingDriver): - - "Send notifications using the 2.0 message format." - - def __init__(self, conf, **kwargs): - super(MessagingV2Driver, self).__init__(conf, version=2.0, **kwargs) diff --git a/oslo_messaging/notify/middleware.py b/oslo_messaging/notify/middleware.py deleted file mode 100644 index 60aab27..0000000 --- a/oslo_messaging/notify/middleware.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Send notifications on request - -""" -import logging -import os.path -import sys -import traceback as tb - -from oslo_config import cfg -from oslo_middleware import base -import six -import webob.dec - -import oslo_messaging -from oslo_messaging._i18n import _LE -from oslo_messaging import notify - -LOG = logging.getLogger(__name__) - - -def log_and_ignore_error(fn): - def wrapped(*args, **kwargs): - try: - return fn(*args, **kwargs) - except Exception as e: - LOG.exception(_LE('An exception occurred processing ' - 'the API call: %s ') % e) - return wrapped - - -class RequestNotifier(base.Middleware): - """Send notification on request.""" - - @classmethod - def factory(cls, global_conf, **local_conf): - """Factory method for paste.deploy.""" - conf = global_conf.copy() - conf.update(local_conf) - - def _factory(app): - return cls(app, **conf) - return _factory - - def __init__(self, app, **conf): - self.notifier = notify.Notifier( - oslo_messaging.get_notification_transport(cfg.CONF, - conf.get('url')), - publisher_id=conf.get('publisher_id', - os.path.basename(sys.argv[0]))) - self.service_name = conf.get('service_name') - self.ignore_req_list = [x.upper().strip() for x in - conf.get('ignore_req_list', '').split(',')] - super(RequestNotifier, self).__init__(app) - - @staticmethod - def environ_to_dict(environ): - """Following PEP 333, server variables are lower case, so don't - include them. - - """ - return dict((k, v) for k, v in six.iteritems(environ) - if k.isupper() and k != 'HTTP_X_AUTH_TOKEN') - - @log_and_ignore_error - def process_request(self, request): - request.environ['HTTP_X_SERVICE_NAME'] = \ - self.service_name or request.host - payload = { - 'request': self.environ_to_dict(request.environ), - } - - self.notifier.info({}, - 'http.request', - payload) - - @log_and_ignore_error - def process_response(self, request, response, - exception=None, traceback=None): - payload = { - 'request': self.environ_to_dict(request.environ), - } - - if response: - payload['response'] = { - 'status': response.status, - 'headers': response.headers, - } - - if exception: - payload['exception'] = { - 'value': repr(exception), - 'traceback': tb.format_tb(traceback) - } - - self.notifier.info({}, - 'http.response', - payload) - - @webob.dec.wsgify - def __call__(self, req): - if req.method in self.ignore_req_list: - return req.get_response(self.application) - else: - self.process_request(req) - try: - response = req.get_response(self.application) - except Exception: - exc_type, value, traceback = sys.exc_info() - self.process_response(req, None, value, traceback) - raise - else: - self.process_response(req, response) - return response diff --git a/oslo_messaging/notify/notifier.py b/oslo_messaging/notify/notifier.py deleted file mode 100644 index af42569..0000000 --- a/oslo_messaging/notify/notifier.py +++ /dev/null @@ -1,423 +0,0 @@ - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import argparse -import logging -import uuid - -from debtcollector import renames -from oslo_config import cfg -from oslo_utils import timeutils -import six -from stevedore import extension -from stevedore import named - -from oslo_messaging._i18n import _LE -from oslo_messaging import serializer as msg_serializer -from oslo_messaging import transport as msg_transport - -_notifier_opts = [ - cfg.MultiStrOpt('driver', - default=[], - deprecated_name='notification_driver', - deprecated_group='DEFAULT', - help='The Drivers(s) to handle sending notifications. ' - 'Possible values are messaging, messagingv2, ' - 'routing, log, test, noop'), - cfg.StrOpt('transport_url', - deprecated_name='notification_transport_url', - deprecated_group='DEFAULT', - secret=True, - help='A URL representing the messaging driver to use for ' - 'notifications. If not set, we fall back to the same ' - 'configuration used for RPC.'), - cfg.ListOpt('topics', - default=['notifications', ], - deprecated_opts=[ - cfg.DeprecatedOpt('topics', - group='rpc_notifier2'), - cfg.DeprecatedOpt('notification_topics', - group='DEFAULT') - ], - help='AMQP topic used for OpenStack notifications.'), -] - -_LOG = logging.getLogger(__name__) - - -def _send_notification(): - """Command line tool to send notifications manually.""" - parser = argparse.ArgumentParser( - description='Oslo.messaging notification sending', - ) - parser.add_argument('--config-file', - help='Path to configuration file') - parser.add_argument('--transport-url', - help='Transport URL') - parser.add_argument('--publisher-id', - help='Publisher ID') - parser.add_argument('--event-type', - default="test", - help="Event type") - parser.add_argument('--topic', - nargs='*', - help="Topic to send to") - parser.add_argument('--priority', - default="info", - choices=("info", - "audit", - "warn", - "error", - "critical", - "sample"), - help='Event type') - parser.add_argument('--driver', - default="messagingv2", - choices=extension.ExtensionManager( - 'oslo.messaging.notify.drivers' - ).names(), - help='Notification driver') - parser.add_argument('payload') - args = parser.parse_args() - conf = cfg.ConfigOpts() - conf([], - default_config_files=[args.config_file] if args.config_file else None) - transport = get_notification_transport(conf, url=args.transport_url) - notifier = Notifier(transport, args.publisher_id, topics=args.topic, - driver=args.driver) - notifier._notify({}, args.event_type, args.payload, args.priority) - - -@six.add_metaclass(abc.ABCMeta) -class Driver(object): - """Base driver for Notifications""" - - def __init__(self, conf, topics, transport): - """base driver initialization - - :param conf: configuration options - :param topics: list of topics - :param transport: transport driver to use - """ - self.conf = conf - self.topics = topics - self.transport = transport - - @abc.abstractmethod - def notify(self, ctxt, msg, priority, retry): - """send a single notification with a specific priority - - :param ctxt: current request context - :param msg: message to be sent - :type msg: str - :param priority: priority of the message - :type priority: str - :param retry: an connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - pass - - -def get_notification_transport(conf, url=None, - allowed_remote_exmods=None, aliases=None): - conf.register_opts(_notifier_opts, - group='oslo_messaging_notifications') - if url is None: - url = conf.oslo_messaging_notifications.transport_url - return msg_transport.get_transport(conf, url, - allowed_remote_exmods, aliases) - - -class Notifier(object): - - """Send notification messages. - - The Notifier class is used for sending notification messages over a - messaging transport or other means. - - Notification messages follow the following format:: - - {'message_id': six.text_type(uuid.uuid4()), - 'publisher_id': 'compute.host1', - 'timestamp': timeutils.utcnow(), - 'priority': 'WARN', - 'event_type': 'compute.create_instance', - 'payload': {'instance_id': 12, ... }} - - A Notifier object can be instantiated with a transport object and a - publisher ID: - - notifier = messaging.Notifier(get_notification_transport(CONF), - 'compute') - - and notifications are sent via drivers chosen with the driver - config option and on the topics chosen with the topics config - option in [oslo_messaging_notifications] section. - - Alternatively, a Notifier object can be instantiated with a specific - driver or topic:: - - transport = notifier.get_notification_transport(CONF) - notifier = notifier.Notifier(transport, - 'compute.host', - driver='messaging', - topic='notifications') - - Notifier objects are relatively expensive to instantiate (mostly the cost - of loading notification drivers), so it is possible to specialize a given - Notifier object with a different publisher id using the prepare() method:: - - notifier = notifier.prepare(publisher_id='compute') - notifier.info(ctxt, event_type, payload) - """ - - @renames.renamed_kwarg('topic', 'topics', - message="Please use topics instead of topic", - version='4.5.0', - removal_version='5.0.0') - def __init__(self, transport, publisher_id=None, - driver=None, topic=None, - serializer=None, retry=None, - topics=None): - """Construct a Notifier object. - - :param transport: the transport to use for sending messages - :type transport: oslo_messaging.Transport - :param publisher_id: field in notifications sent, for example - 'compute.host1' - :type publisher_id: str - :param driver: a driver to lookup from oslo_messaging.notify.drivers - :type driver: str - :param topic: the topic which to send messages on - :type topic: str - :param serializer: an optional entity serializer - :type serializer: Serializer - :param retry: an connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - :param topics: the topics which to send messages on - :type topics: list of strings - """ - conf = transport.conf - conf.register_opts(_notifier_opts, - group='oslo_messaging_notifications') - - self.transport = transport - self.publisher_id = publisher_id - self.retry = retry - - self._driver_names = ([driver] if driver is not None else - conf.oslo_messaging_notifications.driver) - - if topics is not None: - self._topics = topics - elif topic is not None: - self._topics = [topic] - else: - self._topics = conf.oslo_messaging_notifications.topics - self._serializer = serializer or msg_serializer.NoOpSerializer() - - self._driver_mgr = named.NamedExtensionManager( - 'oslo.messaging.notify.drivers', - names=self._driver_names, - invoke_on_load=True, - invoke_args=[conf], - invoke_kwds={ - 'topics': self._topics, - 'transport': self.transport, - } - ) - - _marker = object() - - def prepare(self, publisher_id=_marker, retry=_marker): - """Return a specialized Notifier instance. - - Returns a new Notifier instance with the supplied publisher_id. Allows - sending notifications from multiple publisher_ids without the overhead - of notification driver loading. - - :param publisher_id: field in notifications sent, for example - 'compute.host1' - :type publisher_id: str - :param retry: an connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - return _SubNotifier._prepare(self, publisher_id, retry=retry) - - def _notify(self, ctxt, event_type, payload, priority, publisher_id=None, - retry=None): - payload = self._serializer.serialize_entity(ctxt, payload) - ctxt = self._serializer.serialize_context(ctxt) - - msg = dict(message_id=six.text_type(uuid.uuid4()), - publisher_id=publisher_id or self.publisher_id, - event_type=event_type, - priority=priority, - payload=payload, - timestamp=six.text_type(timeutils.utcnow())) - - def do_notify(ext): - try: - ext.obj.notify(ctxt, msg, priority, retry or self.retry) - except Exception as e: - _LOG.exception(_LE("Problem '%(e)s' attempting to send to " - "notification system. Payload=%(payload)s"), - dict(e=e, payload=payload)) - - if self._driver_mgr.extensions: - self._driver_mgr.map(do_notify) - - def audit(self, ctxt, event_type, payload): - """Send a notification at audit level. - - :param ctxt: a request context dict - :type ctxt: dict - :param event_type: describes the event, for example - 'compute.create_instance' - :type event_type: str - :param payload: the notification payload - :type payload: dict - :raises: MessageDeliveryFailure - """ - self._notify(ctxt, event_type, payload, 'AUDIT') - - def debug(self, ctxt, event_type, payload): - """Send a notification at debug level. - - :param ctxt: a request context dict - :type ctxt: dict - :param event_type: describes the event, for example - 'compute.create_instance' - :type event_type: str - :param payload: the notification payload - :type payload: dict - :raises: MessageDeliveryFailure - """ - self._notify(ctxt, event_type, payload, 'DEBUG') - - def info(self, ctxt, event_type, payload): - """Send a notification at info level. - - :param ctxt: a request context dict - :type ctxt: dict - :param event_type: describes the event, for example - 'compute.create_instance' - :type event_type: str - :param payload: the notification payload - :type payload: dict - :raises: MessageDeliveryFailure - """ - self._notify(ctxt, event_type, payload, 'INFO') - - def warn(self, ctxt, event_type, payload): - """Send a notification at warning level. - - :param ctxt: a request context dict - :type ctxt: dict - :param event_type: describes the event, for example - 'compute.create_instance' - :type event_type: str - :param payload: the notification payload - :type payload: dict - :raises: MessageDeliveryFailure - """ - self._notify(ctxt, event_type, payload, 'WARN') - - warning = warn - - def error(self, ctxt, event_type, payload): - """Send a notification at error level. - - :param ctxt: a request context dict - :type ctxt: dict - :param event_type: describes the event, for example - 'compute.create_instance' - :type event_type: str - :param payload: the notification payload - :type payload: dict - :raises: MessageDeliveryFailure - """ - self._notify(ctxt, event_type, payload, 'ERROR') - - def critical(self, ctxt, event_type, payload): - """Send a notification at critical level. - - :param ctxt: a request context dict - :type ctxt: dict - :param event_type: describes the event, for example - 'compute.create_instance' - :type event_type: str - :param payload: the notification payload - :type payload: dict - :raises: MessageDeliveryFailure - """ - self._notify(ctxt, event_type, payload, 'CRITICAL') - - def sample(self, ctxt, event_type, payload): - """Send a notification at sample level. - - Sample notifications are for high-frequency events - that typically contain small payloads. eg: "CPU = 70%" - - Not all drivers support the sample level - (log, for example) so these could be dropped. - - :param ctxt: a request context dict - :type ctxt: dict - :param event_type: describes the event, for example - 'compute.create_instance' - :type event_type: str - :param payload: the notification payload - :type payload: dict - :raises: MessageDeliveryFailure - """ - self._notify(ctxt, event_type, payload, 'SAMPLE') - - -class _SubNotifier(Notifier): - - _marker = Notifier._marker - - def __init__(self, base, publisher_id, retry): - self._base = base - self.transport = base.transport - self.publisher_id = publisher_id - self.retry = retry - - self._serializer = self._base._serializer - self._driver_mgr = self._base._driver_mgr - - def _notify(self, ctxt, event_type, payload, priority): - super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority) - - @classmethod - def _prepare(cls, base, publisher_id=_marker, retry=_marker): - if publisher_id is cls._marker: - publisher_id = base.publisher_id - if retry is cls._marker: - retry = base.retry - return cls(base, publisher_id, retry=retry) diff --git a/oslo_messaging/opts.py b/oslo_messaging/opts.py deleted file mode 100644 index c252496..0000000 --- a/oslo_messaging/opts.py +++ /dev/null @@ -1,98 +0,0 @@ - -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'list_opts' -] - -import copy -import itertools - -from oslo_messaging._drivers import amqp -from oslo_messaging._drivers.amqp1_driver import opts as amqp_opts -from oslo_messaging._drivers import base as drivers_base -from oslo_messaging._drivers import impl_pika -from oslo_messaging._drivers import impl_rabbit -from oslo_messaging._drivers.impl_zmq import zmq_options -from oslo_messaging._drivers.pika_driver import pika_connection_factory -from oslo_messaging._drivers.zmq_driver.matchmaker import matchmaker_redis -from oslo_messaging.notify import notifier -from oslo_messaging.rpc import client -from oslo_messaging import server -from oslo_messaging import transport - - -_global_opt_lists = [ - drivers_base.base_opts, - zmq_options.zmq_opts, - server._pool_opts, - client._client_opts, - transport._transport_opts, -] - -_opts = [ - (None, list(itertools.chain(*_global_opt_lists))), - ('matchmaker_redis', matchmaker_redis.matchmaker_redis_opts), - ('oslo_messaging_zmq', zmq_options.zmq_opts), - ('oslo_messaging_amqp', amqp_opts.amqp1_opts), - ('oslo_messaging_notifications', notifier._notifier_opts), - ('oslo_messaging_rabbit', list( - itertools.chain(amqp.amqp_opts, impl_rabbit.rabbit_opts, - pika_connection_factory.pika_opts, - impl_pika.pika_pool_opts, impl_pika.notification_opts, - impl_pika.rpc_opts))), -] - - -def list_opts(): - """Return a list of oslo.config options available in the library. - - The returned list includes all oslo.config options which may be registered - at runtime by the library. - - Each element of the list is a tuple. The first element is the name of the - group under which the list of elements in the second element will be - registered. A group name of None corresponds to the [DEFAULT] group in - config files. - - This function is also discoverable via the 'oslo_messaging' entry point - under the 'oslo.config.opts' namespace. - - The purpose of this is to allow tools like the Oslo sample config file - generator to discover the options exposed to users by this library. - - :returns: a list of (group_name, opts) tuples - """ - return [(g, copy.deepcopy(o)) for g, o in _opts] - - -def set_defaults(conf, executor_thread_pool_size=None): - """Set defaults for configuration variables. - - Overrides default options values. - - :param conf: Config instance specified to set default options in it. Using - of instances instead of a global config object prevents conflicts between - options declaration. - :type conf: oslo.config.cfg.ConfigOpts instance. - - :keyword executor_thread_pool_size: Size of executor thread pool. - :type executor_thread_pool_size: int - :default executor_thread_pool_size: None - - """ - if executor_thread_pool_size is not None: - conf.set_default('executor_thread_pool_size', - executor_thread_pool_size) diff --git a/oslo_messaging/rpc/__init__.py b/oslo_messaging/rpc/__init__.py deleted file mode 100644 index f9cc881..0000000 --- a/oslo_messaging/rpc/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'ClientSendError', - 'ExpectedException', - 'NoSuchMethod', - 'RPCClient', - 'RPCDispatcher', - 'RPCDispatcherError', - 'RPCVersionCapError', - 'RemoteError', - 'UnsupportedVersion', - 'expected_exceptions', - 'get_rpc_server', -] - -from .client import * -from .dispatcher import * -from .server import * diff --git a/oslo_messaging/rpc/client.py b/oslo_messaging/rpc/client.py deleted file mode 100644 index 88e21a5..0000000 --- a/oslo_messaging/rpc/client.py +++ /dev/null @@ -1,432 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'ClientSendError', - 'RPCClient', - 'RPCVersionCapError', - 'RemoteError', -] - -import abc - -from oslo_config import cfg -import six - -from oslo_messaging._drivers import base as driver_base -from oslo_messaging import _utils as utils -from oslo_messaging import exceptions -from oslo_messaging import serializer as msg_serializer - -_client_opts = [ - cfg.IntOpt('rpc_response_timeout', - default=60, - help='Seconds to wait for a response from a call.'), -] - - -class RemoteError(exceptions.MessagingException): - - """Signifies that a remote endpoint method has raised an exception. - - Contains a string representation of the type of the original exception, - the value of the original exception, and the traceback. These are - sent to the parent as a joined string so printing the exception - contains all of the relevant info. - """ - - def __init__(self, exc_type=None, value=None, traceback=None): - self.exc_type = exc_type - self.value = value - self.traceback = traceback - msg = ("Remote error: %(exc_type)s %(value)s\n%(traceback)s." % - dict(exc_type=self.exc_type, value=self.value, - traceback=self.traceback)) - super(RemoteError, self).__init__(msg) - - -class RPCVersionCapError(exceptions.MessagingException): - - def __init__(self, version, version_cap): - self.version = version - self.version_cap = version_cap - msg = ("Requested message version, %(version)s is incompatible. It " - "needs to be equal in major version and less than or equal " - "in minor version as the specified version cap " - "%(version_cap)s." % - dict(version=self.version, version_cap=self.version_cap)) - super(RPCVersionCapError, self).__init__(msg) - - -class ClientSendError(exceptions.MessagingException): - """Raised if we failed to send a message to a target.""" - - def __init__(self, target, ex): - msg = 'Failed to send to target "%s": %s' % (target, ex) - super(ClientSendError, self).__init__(msg) - self.target = target - self.ex = ex - - -@six.add_metaclass(abc.ABCMeta) -class _BaseCallContext(object): - - _marker = object() - - def __init__(self, transport, target, serializer, - timeout=None, version_cap=None, retry=None): - self.conf = transport.conf - - self.transport = transport - self.target = target - self.serializer = serializer - self.timeout = timeout - self.retry = retry - self.version_cap = version_cap - - super(_BaseCallContext, self).__init__() - - def _make_message(self, ctxt, method, args): - msg = dict(method=method) - - msg['args'] = dict() - for argname, arg in six.iteritems(args): - msg['args'][argname] = self.serializer.serialize_entity(ctxt, arg) - - if self.target.namespace is not None: - msg['namespace'] = self.target.namespace - if self.target.version is not None: - msg['version'] = self.target.version - - return msg - - def _check_version_cap(self, version): - if not utils.version_is_compatible(self.version_cap, version): - raise RPCVersionCapError(version=version, - version_cap=self.version_cap) - - def can_send_version(self, version=_marker): - """Check to see if a version is compatible with the version cap.""" - version = self.target.version if version is self._marker else version - return utils.version_is_compatible(self.version_cap, version) - - @classmethod - def _check_version(cls, version): - if version is not cls._marker: - # quick sanity check to make sure parsable version numbers are used - try: - utils.version_is_compatible(version, version) - except (IndexError, ValueError): - raise exceptions.MessagingException( - "Version must contain a major and minor integer. Got %s" - % version) - - def cast(self, ctxt, method, **kwargs): - """Invoke a method and return immediately. See RPCClient.cast().""" - msg = self._make_message(ctxt, method, kwargs) - msg_ctxt = self.serializer.serialize_context(ctxt) - - self._check_version_cap(msg.get('version')) - - try: - self.transport._send(self.target, msg_ctxt, msg, retry=self.retry) - except driver_base.TransportDriverError as ex: - raise ClientSendError(self.target, ex) - - def call(self, ctxt, method, **kwargs): - """Invoke a method and wait for a reply. See RPCClient.call().""" - if self.target.fanout: - raise exceptions.InvalidTarget('A call cannot be used with fanout', - self.target) - - msg = self._make_message(ctxt, method, kwargs) - msg_ctxt = self.serializer.serialize_context(ctxt) - - timeout = self.timeout - if self.timeout is None: - timeout = self.conf.rpc_response_timeout - - self._check_version_cap(msg.get('version')) - - try: - result = self.transport._send(self.target, msg_ctxt, msg, - wait_for_reply=True, timeout=timeout, - retry=self.retry) - except driver_base.TransportDriverError as ex: - raise ClientSendError(self.target, ex) - - return self.serializer.deserialize_entity(ctxt, result) - - @abc.abstractmethod - def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, - version=_marker, server=_marker, fanout=_marker, - timeout=_marker, version_cap=_marker, retry=_marker): - """Prepare a method invocation context. See RPCClient.prepare().""" - - -class _CallContext(_BaseCallContext): - - _marker = _BaseCallContext._marker - - @classmethod - def _prepare(cls, call_context, - exchange=_marker, topic=_marker, namespace=_marker, - version=_marker, server=_marker, fanout=_marker, - timeout=_marker, version_cap=_marker, retry=_marker): - cls._check_version(version) - kwargs = dict( - exchange=exchange, - topic=topic, - namespace=namespace, - version=version, - server=server, - fanout=fanout) - kwargs = dict([(k, v) for k, v in kwargs.items() - if v is not cls._marker]) - target = call_context.target(**kwargs) - - if timeout is cls._marker: - timeout = call_context.timeout - if version_cap is cls._marker: - version_cap = call_context.version_cap - if retry is cls._marker: - retry = call_context.retry - - return _CallContext(call_context.transport, target, - call_context.serializer, - timeout, version_cap, retry) - - def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, - version=_marker, server=_marker, fanout=_marker, - timeout=_marker, version_cap=_marker, retry=_marker): - return _CallContext._prepare(self, - exchange, topic, namespace, - version, server, fanout, - timeout, version_cap, retry) - - -class RPCClient(_BaseCallContext): - - """A class for invoking methods on remote servers. - - The RPCClient class is responsible for sending method invocations to remote - servers via a messaging transport. - - A default target is supplied to the RPCClient constructor, but target - attributes can be overridden for individual method invocations using the - prepare() method. - - A method invocation consists of a request context dictionary, a method name - and a dictionary of arguments. A cast() invocation just sends the request - and returns immediately. A call() invocation waits for the server to send - a return value. - - This class is intended to be used by wrapping it in another class which - provides methods on the subclass to perform the remote invocation using - call() or cast():: - - class TestClient(object): - - def __init__(self, transport): - target = messaging.Target(topic='test', version='2.0') - self._client = messaging.RPCClient(transport, target) - - def test(self, ctxt, arg): - return self._client.call(ctxt, 'test', arg=arg) - - An example of using the prepare() method to override some attributes of the - default target:: - - def test(self, ctxt, arg): - cctxt = self._client.prepare(version='2.5') - return cctxt.call(ctxt, 'test', arg=arg) - - RPCClient have a number of other properties - for example, timeout and - version_cap - which may make sense to override for some method invocations, - so they too can be passed to prepare():: - - def test(self, ctxt, arg): - cctxt = self._client.prepare(timeout=10) - return cctxt.call(ctxt, 'test', arg=arg) - - However, this class can be used directly without wrapping it another class. - For example:: - - transport = messaging.get_transport(cfg.CONF) - target = messaging.Target(topic='test', version='2.0') - client = messaging.RPCClient(transport, target) - client.call(ctxt, 'test', arg=arg) - - but this is probably only useful in limited circumstances as a wrapper - class will usually help to make the code much more obvious. - - By default, cast() and call() will block until the message is successfully - sent. However, the retry parameter can be used to have message sending - fail with a MessageDeliveryFailure after the given number of retries. For - example:: - - client = messaging.RPCClient(transport, target, retry=None) - client.call(ctxt, 'sync') - try: - client.prepare(retry=0).cast(ctxt, 'ping') - except messaging.MessageDeliveryFailure: - LOG.error("Failed to send ping message") - """ - - _marker = _BaseCallContext._marker - - def __init__(self, transport, target, - timeout=None, version_cap=None, serializer=None, retry=None): - """Construct an RPC client. - - :param transport: a messaging transport handle - :type transport: Transport - :param target: the default target for invocations - :type target: Target - :param timeout: an optional default timeout (in seconds) for call()s - :type timeout: int or float - :param version_cap: raise a RPCVersionCapError version exceeds this cap - :type version_cap: str - :param serializer: an optional entity serializer - :type serializer: Serializer - :param retry: an optional default connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - if serializer is None: - serializer = msg_serializer.NoOpSerializer() - - super(RPCClient, self).__init__( - transport, target, serializer, timeout, version_cap, retry - ) - - self.conf.register_opts(_client_opts) - - def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, - version=_marker, server=_marker, fanout=_marker, - timeout=_marker, version_cap=_marker, retry=_marker): - """Prepare a method invocation context. - - Use this method to override client properties for an individual method - invocation. For example:: - - def test(self, ctxt, arg): - cctxt = self.prepare(version='2.5') - return cctxt.call(ctxt, 'test', arg=arg) - - :param exchange: see Target.exchange - :type exchange: str - :param topic: see Target.topic - :type topic: str - :param namespace: see Target.namespace - :type namespace: str - :param version: requirement the server must support, see Target.version - :type version: str - :param server: send to a specific server, see Target.server - :type server: str - :param fanout: send to all servers on topic, see Target.fanout - :type fanout: bool - :param timeout: an optional default timeout (in seconds) for call()s - :type timeout: int or float - :param version_cap: raise a RPCVersionCapError version exceeds this cap - :type version_cap: str - :param retry: an optional connection retries configuration - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - return _CallContext._prepare(self, - exchange, topic, namespace, - version, server, fanout, - timeout, version_cap, retry) - - def cast(self, ctxt, method, **kwargs): - """Invoke a method and return immediately. - - Method arguments must either be primitive types or types supported by - the client's serializer (if any). - - Similarly, the request context must be a dict unless the client's - serializer supports serializing another type. - - Note: cast does not ensure that the remote method will be executed on - each destination. But it does ensure that the method will be not - executed twice on a destination (e.g. 'at-most-once' execution). - - Note: there are no ordering guarantees across successive casts, even - among casts to the same destination. Therefore methods may be executed - in an order different from the order in which they are cast. - - :param ctxt: a request context dict - :type ctxt: dict - :param method: the method name - :type method: str - :param kwargs: a dict of method arguments - :type kwargs: dict - :raises: MessageDeliveryFailure - """ - self.prepare().cast(ctxt, method, **kwargs) - - def call(self, ctxt, method, **kwargs): - """Invoke a method and wait for a reply. - - Method arguments must either be primitive types or types supported by - the client's serializer (if any). Similarly, the request context must - be a dict unless the client's serializer supports serializing another - type. - - The semantics of how any errors raised by the remote RPC endpoint - method are handled are quite subtle. - - Firstly, if the remote exception is contained in one of the modules - listed in the allow_remote_exmods messaging.get_transport() parameter, - then it this exception will be re-raised by call(). However, such - locally re-raised remote exceptions are distinguishable from the same - exception type raised locally because re-raised remote exceptions are - modified such that their class name ends with the '_Remote' suffix so - you may do:: - - if ex.__class__.__name__.endswith('_Remote'): - # Some special case for locally re-raised remote exceptions - - Secondly, if a remote exception is not from a module listed in the - allowed_remote_exmods list, then a messaging.RemoteError exception is - raised with all details of the remote exception. - - Note: call is done 'at-most-once'. In case of we can't known - if the call have been done correctly, because we didn't get the - response on time, MessagingTimeout exception is raised. - The real reason can vary, transport failure, worker - doesn't answer in time or crash, ... - - :param ctxt: a request context dict - :type ctxt: dict - :param method: the method name - :type method: str - :param kwargs: a dict of method arguments - :type kwargs: dict - :raises: MessagingTimeout, RemoteError, MessageDeliveryFailure - """ - return self.prepare().call(ctxt, method, **kwargs) - - def can_send_version(self, version=_marker): - """Check to see if a version is compatible with the version cap.""" - return self.prepare(version=version).can_send_version() diff --git a/oslo_messaging/rpc/dispatcher.py b/oslo_messaging/rpc/dispatcher.py deleted file mode 100644 index 7b1d1a7..0000000 --- a/oslo_messaging/rpc/dispatcher.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'NoSuchMethod', - 'RPCDispatcher', - 'RPCDispatcherError', - 'UnsupportedVersion', - 'ExpectedException', -] - -import logging -import sys - -import six - -from oslo_messaging import _utils as utils -from oslo_messaging import dispatcher -from oslo_messaging import serializer as msg_serializer -from oslo_messaging import server as msg_server -from oslo_messaging import target as msg_target - -LOG = logging.getLogger(__name__) - - -class ExpectedException(Exception): - """Encapsulates an expected exception raised by an RPC endpoint - - Merely instantiating this exception records the current exception - information, which will be passed back to the RPC client without - exceptional logging. - """ - def __init__(self): - self.exc_info = sys.exc_info() - - -class RPCDispatcherError(msg_server.MessagingServerError): - "A base class for all RPC dispatcher exceptions." - - -class NoSuchMethod(RPCDispatcherError, AttributeError): - "Raised if there is no endpoint which exposes the requested method." - - def __init__(self, method): - msg = "Endpoint does not support RPC method %s" % method - super(NoSuchMethod, self).__init__(msg) - self.method = method - - -class UnsupportedVersion(RPCDispatcherError): - "Raised if there is no endpoint which supports the requested version." - - def __init__(self, version, method=None): - msg = "Endpoint does not support RPC version %s" % version - if method: - msg = "%s. Attempted method: %s" % (msg, method) - super(UnsupportedVersion, self).__init__(msg) - self.version = version - self.method = method - - -class RPCDispatcher(dispatcher.DispatcherBase): - """A message dispatcher which understands RPC messages. - - A MessageHandlingServer is constructed by passing a callable dispatcher - which is invoked with context and message dictionaries each time a message - is received. - - RPCDispatcher is one such dispatcher which understands the format of RPC - messages. The dispatcher looks at the namespace, version and method values - in the message and matches those against a list of available endpoints. - - Endpoints may have a target attribute describing the namespace and version - of the methods exposed by that object. All public methods on an endpoint - object are remotely invokable by clients. - - - """ - - def __init__(self, endpoints, serializer): - """Construct a rpc server dispatcher. - - :param endpoints: list of endpoint objects for dispatching to - :param serializer: optional message serializer - """ - - self.endpoints = endpoints - self.serializer = serializer or msg_serializer.NoOpSerializer() - self._default_target = msg_target.Target() - - @staticmethod - def _is_namespace(target, namespace): - return namespace in target.accepted_namespaces - - @staticmethod - def _is_compatible(target, version): - endpoint_version = target.version or '1.0' - return utils.version_is_compatible(endpoint_version, version) - - def _do_dispatch(self, endpoint, method, ctxt, args): - ctxt = self.serializer.deserialize_context(ctxt) - new_args = dict() - for argname, arg in six.iteritems(args): - new_args[argname] = self.serializer.deserialize_entity(ctxt, arg) - func = getattr(endpoint, method) - result = func(ctxt, **new_args) - return self.serializer.serialize_entity(ctxt, result) - - def dispatch(self, incoming): - """Dispatch an RPC message to the appropriate endpoint method. - - :param incoming: incoming message - :type incoming: IncomingMessage - :raises: NoSuchMethod, UnsupportedVersion - """ - message = incoming.message - ctxt = incoming.ctxt - - method = message.get('method') - args = message.get('args', {}) - namespace = message.get('namespace') - version = message.get('version', '1.0') - - found_compatible = False - for endpoint in self.endpoints: - target = getattr(endpoint, 'target', None) - if not target: - target = self._default_target - - if not (self._is_namespace(target, namespace) and - self._is_compatible(target, version)): - continue - - if hasattr(endpoint, method): - return self._do_dispatch(endpoint, method, ctxt, args) - - found_compatible = True - - if found_compatible: - raise NoSuchMethod(method) - else: - raise UnsupportedVersion(version, method=method) diff --git a/oslo_messaging/rpc/server.py b/oslo_messaging/rpc/server.py deleted file mode 100644 index c51ffb9..0000000 --- a/oslo_messaging/rpc/server.py +++ /dev/null @@ -1,209 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -An RPC server exposes a number of endpoints, each of which contain a set of -methods which may be invoked remotely by clients over a given transport. - -To create an RPC server, you supply a transport, target and a list of -endpoints. - -A transport can be obtained simply by calling the get_transport() method:: - - transport = messaging.get_transport(conf) - -which will load the appropriate transport driver according to the user's -messaging configuration. See get_transport() for more details. - -The target supplied when creating an RPC server expresses the topic, server -name and - optionally - the exchange to listen on. See Target for more details -on these attributes. - -Each endpoint object may have a target attribute which may have namespace and -version fields set. By default, we use the 'null namespace' and version 1.0. -Incoming method calls will be dispatched to the first endpoint with the -requested method, a matching namespace and a compatible version number. - -RPC servers have start(), stop() and wait() messages to begin handling -requests, stop handling requests and wait for all in-process requests to -complete. - -A simple example of an RPC server with multiple endpoints might be:: - - from oslo_config import cfg - import oslo_messaging - import time - - class ServerControlEndpoint(object): - - target = oslo_messaging.Target(namespace='control', - version='2.0') - - def __init__(self, server): - self.server = server - - def stop(self, ctx): - if self.server: - self.server.stop() - - class TestEndpoint(object): - - def test(self, ctx, arg): - return arg - - transport = oslo_messaging.get_transport(cfg.CONF) - target = oslo_messaging.Target(topic='test', server='server1') - endpoints = [ - ServerControlEndpoint(None), - TestEndpoint(), - ] - server = oslo_messaging.get_rpc_server(transport, target, endpoints, - executor='blocking') - try: - server.start() - while True: - time.sleep(1) - except KeyboardInterrupt: - print("Stopping server") - - server.stop() - server.wait() - -Clients can invoke methods on the server by sending the request to a topic and -it gets sent to one of the servers listening on the topic, or by sending the -request to a specific server listening on the topic, or by sending the request -to all servers listening on the topic (known as fanout). These modes are chosen -via the server and fanout attributes on Target but the mode used is transparent -to the server. - -The first parameter to method invocations is always the request context -supplied by the client. - -Parameters to the method invocation are primitive types and so must be the -return values from the methods. By supplying a serializer object, a server can -deserialize a request context and arguments from - and serialize return values -to - primitive types. -""" - -__all__ = [ - 'get_rpc_server', - 'expected_exceptions', -] - -import logging -import sys - -from oslo_messaging._i18n import _LE -from oslo_messaging.rpc import dispatcher as rpc_dispatcher -from oslo_messaging import server as msg_server - -LOG = logging.getLogger(__name__) - - -class RPCServer(msg_server.MessageHandlingServer): - def __init__(self, transport, target, dispatcher, executor='blocking'): - super(RPCServer, self).__init__(transport, dispatcher, executor) - self._target = target - - def _create_listener(self): - return self.transport._listen(self._target, 1, None) - - def _process_incoming(self, incoming): - message = incoming[0] - try: - message.acknowledge() - except Exception: - LOG.exception(_LE("Can not acknowledge message. Skip processing")) - return - - failure = None - try: - res = self.dispatcher.dispatch(message) - except rpc_dispatcher.ExpectedException as e: - failure = e.exc_info - LOG.debug(u'Expected exception during message handling (%s)', e) - except Exception: - # current sys.exc_info() content can be overriden - # by another exception raised by a log handler during - # LOG.exception(). So keep a copy and delete it later. - failure = sys.exc_info() - LOG.exception(_LE('Exception during message handling')) - - try: - if failure is None: - message.reply(res) - else: - message.reply(failure=failure) - except Exception: - LOG.exception(_LE("Can not send reply for message")) - finally: - # NOTE(dhellmann): Remove circular object reference - # between the current stack frame and the traceback in - # exc_info. - del failure - - -def get_rpc_server(transport, target, endpoints, - executor='blocking', serializer=None): - """Construct an RPC server. - - The executor parameter controls how incoming messages will be received and - dispatched. By default, the most simple executor is used - the blocking - executor. - - If the eventlet executor is used, the threading and time library need to be - monkeypatched. - - :param transport: the messaging transport - :type transport: Transport - :param target: the exchange, topic and server to listen on - :type target: Target - :param endpoints: a list of endpoint objects - :type endpoints: list - :param executor: name of a message executor - for example - 'eventlet', 'blocking' - :type executor: str - :param serializer: an optional entity serializer - :type serializer: Serializer - """ - dispatcher = rpc_dispatcher.RPCDispatcher(endpoints, serializer) - return RPCServer(transport, target, dispatcher, executor) - - -def expected_exceptions(*exceptions): - """Decorator for RPC endpoint methods that raise expected exceptions. - - Marking an endpoint method with this decorator allows the declaration - of expected exceptions that the RPC server should not consider fatal, - and not log as if they were generated in a real error scenario. - - Note that this will cause listed exceptions to be wrapped in an - ExpectedException, which is used internally by the RPC sever. The RPC - client will see the original exception type. - """ - def outer(func): - def inner(*args, **kwargs): - try: - return func(*args, **kwargs) - # Take advantage of the fact that we can catch - # multiple exception types using a tuple of - # exception classes, with subclass detection - # for free. Any exception that is not in or - # derived from the args passed to us will be - # ignored and thrown as normal. - except exceptions: - raise rpc_dispatcher.ExpectedException() - return inner - return outer diff --git a/oslo_messaging/serializer.py b/oslo_messaging/serializer.py deleted file mode 100644 index f9ac6aa..0000000 --- a/oslo_messaging/serializer.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = ['Serializer', 'NoOpSerializer', 'JsonPayloadSerializer', - 'RequestContextSerializer'] - -"""Provides the definition of a message serialization handler""" - -import abc - -from debtcollector import removals -from oslo_context import context as common_context -from oslo_serialization import jsonutils -import six - - -@six.add_metaclass(abc.ABCMeta) -class Serializer(object): - """Generic (de-)serialization definition base class.""" - - @abc.abstractmethod - def serialize_entity(self, ctxt, entity): - """Serialize something to primitive form. - - :param ctxt: Request context, in deserialized form - :param entity: Entity to be serialized - :returns: Serialized form of entity - """ - - @abc.abstractmethod - def deserialize_entity(self, ctxt, entity): - """Deserialize something from primitive form. - - :param ctxt: Request context, in deserialized form - :param entity: Primitive to be deserialized - :returns: Deserialized form of entity - """ - - @abc.abstractmethod - def serialize_context(self, ctxt): - """Serialize a request context into a dictionary. - - :param ctxt: Request context - :returns: Serialized form of context - """ - - @abc.abstractmethod - def deserialize_context(self, ctxt): - """Deserialize a dictionary into a request context. - - :param ctxt: Request context dictionary - :returns: Deserialized form of entity - """ - - -@removals.removed_class("RequestContextSerializer", - version="4.6", - removal_version="5.0") -class RequestContextSerializer(Serializer): - - def __init__(self, base): - self._base = base - - def serialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.serialize_entity(context, entity) - - def deserialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.deserialize_entity(context, entity) - - def serialize_context(self, context): - return context.to_dict() - - def deserialize_context(self, context): - return common_context.RequestContext.from_dict(context) - - -class NoOpSerializer(Serializer): - """A serializer that does nothing.""" - - def serialize_entity(self, ctxt, entity): - return entity - - def deserialize_entity(self, ctxt, entity): - return entity - - def serialize_context(self, ctxt): - return ctxt - - def deserialize_context(self, ctxt): - return ctxt - - -class JsonPayloadSerializer(NoOpSerializer): - @staticmethod - def serialize_entity(context, entity): - return jsonutils.to_primitive(entity, convert_instances=True) diff --git a/oslo_messaging/server.py b/oslo_messaging/server.py deleted file mode 100644 index ac4a964..0000000 --- a/oslo_messaging/server.py +++ /dev/null @@ -1,460 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'ExecutorLoadFailure', - 'MessageHandlingServer', - 'MessagingServerError', - 'ServerListenError', -] - -import abc -import functools -import inspect -import logging -import threading -import traceback - -from oslo_config import cfg -from oslo_service import service -from oslo_utils import eventletutils -from oslo_utils import timeutils -import six -from stevedore import driver - -from oslo_messaging._drivers import base as driver_base -from oslo_messaging._i18n import _LW, _LI -from oslo_messaging import exceptions - -LOG = logging.getLogger(__name__) - -# The default number of seconds of waiting after which we will emit a log -# message -DEFAULT_LOG_AFTER = 30 - - -_pool_opts = [ - cfg.IntOpt('executor_thread_pool_size', - default=64, - deprecated_name="rpc_thread_pool_size", - help='Size of executor thread pool.'), -] - - -class MessagingServerError(exceptions.MessagingException): - """Base class for all MessageHandlingServer exceptions.""" - - -class ExecutorLoadFailure(MessagingServerError): - """Raised if an executor can't be loaded.""" - - def __init__(self, executor, ex): - msg = 'Failed to load executor "%s": %s' % (executor, ex) - super(ExecutorLoadFailure, self).__init__(msg) - self.executor = executor - self.ex = ex - - -class ServerListenError(MessagingServerError): - """Raised if we failed to listen on a target.""" - - def __init__(self, target, ex): - msg = 'Failed to listen on target "%s": %s' % (target, ex) - super(ServerListenError, self).__init__(msg) - self.target = target - self.ex = ex - - -class TaskTimeout(MessagingServerError): - """Raised if we timed out waiting for a task to complete.""" - - -class _OrderedTask(object): - """A task which must be executed in a particular order. - - A caller may wait for this task to complete by calling - `wait_for_completion`. - - A caller may run this task with `run_once`, which will ensure that however - many times the task is called it only runs once. Simultaneous callers will - block until the running task completes, which means that any caller can be - sure that the task has completed after run_once returns. - """ - - INIT = 0 # The task has not yet started - RUNNING = 1 # The task is running somewhere - COMPLETE = 2 # The task has run somewhere - - def __init__(self, name): - """Create a new _OrderedTask. - - :param name: The name of this task. Used in log messages. - """ - super(_OrderedTask, self).__init__() - - self._name = name - self._cond = threading.Condition() - self._state = self.INIT - - def _wait(self, condition, msg, log_after, timeout_timer): - """Wait while condition() is true. Write a log message if condition() - has not become false within `log_after` seconds. Raise TaskTimeout if - timeout_timer expires while waiting. - """ - - log_timer = None - if log_after != 0: - log_timer = timeutils.StopWatch(duration=log_after) - log_timer.start() - - while condition(): - if log_timer is not None and log_timer.expired(): - LOG.warning(_LW('Possible hang: %s'), msg) - LOG.debug(''.join(traceback.format_stack())) - # Only log once. After than we wait indefinitely without - # logging. - log_timer = None - - if timeout_timer is not None and timeout_timer.expired(): - raise TaskTimeout(msg) - - timeouts = [] - if log_timer is not None: - timeouts.append(log_timer.leftover()) - if timeout_timer is not None: - timeouts.append(timeout_timer.leftover()) - - wait = None - if timeouts: - wait = min(timeouts) - self._cond.wait(wait) - - @property - def complete(self): - return self._state == self.COMPLETE - - def wait_for_completion(self, caller, log_after, timeout_timer): - """Wait until this task has completed. - - :param caller: The name of the task which is waiting. - :param log_after: Emit a log message if waiting longer than `log_after` - seconds. - :param timeout_timer: Raise TaskTimeout if StopWatch object - `timeout_timer` expires while waiting. - """ - with self._cond: - msg = '%s is waiting for %s to complete' % (caller, self._name) - self._wait(lambda: not self.complete, - msg, log_after, timeout_timer) - - def run_once(self, fn, log_after, timeout_timer): - """Run a task exactly once. If it is currently running in another - thread, wait for it to complete. If it has already run, return - immediately without running it again. - - :param fn: The task to run. It must be a callable taking no arguments. - It may optionally return another callable, which also takes - no arguments, which will be executed after completion has - been signaled to other threads. - :param log_after: Emit a log message if waiting longer than `log_after` - seconds. - :param timeout_timer: Raise TaskTimeout if StopWatch object - `timeout_timer` expires while waiting. - """ - with self._cond: - if self._state == self.INIT: - self._state = self.RUNNING - # Note that nothing waits on RUNNING, so no need to notify - - # We need to release the condition lock before calling out to - # prevent deadlocks. Reacquire it immediately afterwards. - self._cond.release() - try: - post_fn = fn() - finally: - self._cond.acquire() - self._state = self.COMPLETE - self._cond.notify_all() - - if post_fn is not None: - # Release the condition lock before calling out to prevent - # deadlocks. Reacquire it immediately afterwards. - self._cond.release() - try: - post_fn() - finally: - self._cond.acquire() - elif self._state == self.RUNNING: - msg = ('%s is waiting for another thread to complete' - % self._name) - self._wait(lambda: self._state == self.RUNNING, - msg, log_after, timeout_timer) - - -class _OrderedTaskRunner(object): - """Mixin for a class which executes ordered tasks.""" - - def __init__(self, *args, **kwargs): - super(_OrderedTaskRunner, self).__init__(*args, **kwargs) - - # Get a list of methods on this object which have the _ordered - # attribute - self._tasks = [name - for (name, member) in inspect.getmembers(self) - if inspect.ismethod(member) and - getattr(member, '_ordered', False)] - self.reset_states() - - self._reset_lock = threading.Lock() - - def reset_states(self): - # Create new task states for tasks in reset - self._states = {task: _OrderedTask(task) for task in self._tasks} - - @staticmethod - def decorate_ordered(fn, state, after, reset_after): - - @functools.wraps(fn) - def wrapper(self, *args, **kwargs): - # If the reset_after state has already completed, reset state so - # we can run again. - # NOTE(mdbooth): This is ugly and requires external locking to be - # deterministic when using multiple threads. Consider a thread that - # does: server.stop(), server.wait(). If another thread causes a - # reset between stop() and wait(), this will not have the intended - # behaviour. It is safe without external locking, if the caller - # instantiates a new object. - with self._reset_lock: - if (reset_after is not None and - self._states[reset_after].complete): - self.reset_states() - - # Store the states we started with in case the state wraps on us - # while we're sleeping. We must wait and run_once in the same - # epoch. If the epoch ended while we were sleeping, run_once will - # safely do nothing. - states = self._states - - log_after = kwargs.pop('log_after', DEFAULT_LOG_AFTER) - timeout = kwargs.pop('timeout', None) - - timeout_timer = None - if timeout is not None: - timeout_timer = timeutils.StopWatch(duration=timeout) - timeout_timer.start() - - # Wait for the given preceding state to complete - if after is not None: - states[after].wait_for_completion(state, - log_after, timeout_timer) - - # Run this state - states[state].run_once(lambda: fn(self, *args, **kwargs), - log_after, timeout_timer) - return wrapper - - -def ordered(after=None, reset_after=None): - """A method which will be executed as an ordered task. The method will be - called exactly once, however many times it is called. If it is called - multiple times simultaneously it will only be called once, but all callers - will wait until execution is complete. - - If `after` is given, this method will not run until `after` has completed. - - If `reset_after` is given and the target method has completed, allow this - task to run again by resetting all task states. - - :param after: Optionally, the name of another `ordered` method. Wait for - the completion of `after` before executing this method. - :param reset_after: Optionally, the name of another `ordered` method. Reset - all states when calling this method if `reset_after` - has completed. - """ - def _ordered(fn): - # Set an attribute on the method so we can find it later - setattr(fn, '_ordered', True) - state = fn.__name__ - - return _OrderedTaskRunner.decorate_ordered(fn, state, after, - reset_after) - return _ordered - - -@six.add_metaclass(abc.ABCMeta) -class MessageHandlingServer(service.ServiceBase, _OrderedTaskRunner): - """Server for handling messages. - - Connect a transport to a dispatcher that knows how to process the - message using an executor that knows how the app wants to create - new tasks. - """ - - def __init__(self, transport, dispatcher, executor='blocking'): - """Construct a message handling server. - - The dispatcher parameter is a DispatcherBase instance which is used - for routing request to endpoint for processing. - - The executor parameter controls how incoming messages will be received - and dispatched. By default, the most simple executor is used - the - blocking executor. It handles only one message at once. It's - recommended to use threading or eventlet. - - :param transport: the messaging transport - :type transport: Transport - :param dispatcher: has a dispatch() method which is invoked for each - incoming request - :type dispatcher: DispatcherBase - :param executor: name of message executor - for example - 'eventlet', 'blocking' - :type executor: str - """ - self.conf = transport.conf - self.conf.register_opts(_pool_opts) - - self.transport = transport - self.dispatcher = dispatcher - self.executor_type = executor - if self.executor_type == 'blocking': - # NOTE(sileht): We keep blocking as default to not enforce the - # application to use threading or eventlet. Because application - # have to be preprepared accordingly for each one (monkeypatching, - # threadsafe, ...) - LOG.info(_LI("blocking executor handles only one message at " - "once. threading or eventlet executor is " - "recommended.")) - - self.listener = None - - try: - mgr = driver.DriverManager('oslo.messaging.executors', - self.executor_type) - except RuntimeError as ex: - raise ExecutorLoadFailure(self.executor_type, ex) - - self._executor_cls = mgr.driver - - self._work_executor = None - - self._started = False - - super(MessageHandlingServer, self).__init__() - - def _on_incoming(self, incoming): - """Handles on_incoming event - - :param incoming: incoming request. - """ - self._work_executor.submit(self._process_incoming, incoming) - - @abc.abstractmethod - def _process_incoming(self, incoming): - """Perform processing incoming request - - :param incoming: incoming request. - """ - - @abc.abstractmethod - def _create_listener(self): - """Creates listener object for polling requests - :return: MessageListenerAdapter - """ - - @ordered(reset_after='stop') - def start(self, override_pool_size=None): - """Start handling incoming messages. - - This method causes the server to begin polling the transport for - incoming messages and passing them to the dispatcher. Message - processing will continue until the stop() method is called. - - The executor controls how the server integrates with the applications - I/O handling strategy - it may choose to poll for messages in a new - process, thread or co-operatively scheduled coroutine or simply by - registering a callback with an event loop. Similarly, the executor may - choose to dispatch messages in a new thread, coroutine or simply the - current thread. - """ - # Warn that restarting will be deprecated - if self._started: - LOG.warning(_LW('Restarting a MessageHandlingServer is inherently ' - 'racy. It is deprecated, and will become a noop ' - 'in a future release of oslo.messaging. If you ' - 'need to restart MessageHandlingServer you should ' - 'instantiate a new object.')) - self._started = True - - executor_opts = {} - - if self.executor_type == "threading": - executor_opts["max_workers"] = ( - override_pool_size or self.conf.executor_thread_pool_size - ) - elif self.executor_type == "eventlet": - eventletutils.warn_eventlet_not_patched( - expected_patched_modules=['thread'], - what="the 'oslo.messaging eventlet executor'") - executor_opts["max_workers"] = ( - override_pool_size or self.conf.executor_thread_pool_size - ) - - self._work_executor = self._executor_cls(**executor_opts) - - try: - self.listener = self._create_listener() - except driver_base.TransportDriverError as ex: - raise ServerListenError(self.target, ex) - - self.listener.start(self._on_incoming) - - @ordered(after='start') - def stop(self): - """Stop handling incoming messages. - - Once this method returns, no new incoming messages will be handled by - the server. However, the server may still be in the process of handling - some messages, and underlying driver resources associated to this - server are still in use. See 'wait' for more details. - """ - self.listener.stop() - self._started = False - - @ordered(after='stop') - def wait(self): - """Wait for message processing to complete. - - After calling stop(), there may still be some existing messages - which have not been completely processed. The wait() method blocks - until all message processing has completed. - - Once it's finished, the underlying driver resources associated to this - server are released (like closing useless network connections). - """ - self._work_executor.shutdown(wait=True) - - # Close listener connection after processing all messages - self.listener.cleanup() - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ - # TODO(sergey.vilgelm): implement this method - pass diff --git a/oslo_messaging/target.py b/oslo_messaging/target.py deleted file mode 100644 index e91cc87..0000000 --- a/oslo_messaging/target.py +++ /dev/null @@ -1,101 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class Target(object): - - """Identifies the destination of messages. - - A Target encapsulates all the information to identify where a message - should be sent or what messages a server is listening for. - - Different subsets of the information encapsulated in a Target object is - relevant to various aspects of the API: - - creating a server: - topic and server is required; exchange is optional - an endpoint's target: - namespace and version is optional - client sending a message: - topic is required, all other attributes optional - - Its attributes are: - - :param exchange: A scope for topics. Leave unspecified to default to the - control_exchange configuration option. - :type exchange: str - :param topic: A name which identifies the set of interfaces exposed by a - server. Multiple servers may listen on a topic and messages will be - dispatched to one of the servers in a round-robin fashion. - :type topic: str - :param namespace: Identifies a particular interface (i.e. set of methods) - exposed by a server. The default interface has no namespace identifier - and is referred to as the null namespace. - :type namespace: str - :param version: Interfaces have a major.minor version number associated - with them. A minor number increment indicates a backwards compatible - change and an incompatible change is indicated by a major number bump. - Servers may implement multiple major versions and clients may require - indicate that their message requires a particular minimum minor version. - :type version: str - :param server: Clients can request that a message be directed to a specific - server, rather than just one of a pool of servers listening on the topic. - :type server: str - :param fanout: Clients may request that a message be directed to all - servers listening on a topic by setting fanout to ``True``, rather than - just one of them. - :type fanout: bool - :param legacy_namespaces: A server always accepts messages specified via - the 'namespace' parameter, and may also accept messages defined via - this parameter. This option should be used to switch namespaces safely - during rolling upgrades. - :type legacy_namespaces: list of strings - """ - - def __init__(self, exchange=None, topic=None, namespace=None, - version=None, server=None, fanout=None, - legacy_namespaces=None): - self.exchange = exchange - self.topic = topic - self.namespace = namespace - self.version = version - self.server = server - self.fanout = fanout - self.accepted_namespaces = [namespace] + (legacy_namespaces or []) - - def __call__(self, **kwargs): - for a in ('exchange', 'topic', 'namespace', - 'version', 'server', 'fanout'): - kwargs.setdefault(a, getattr(self, a)) - return Target(**kwargs) - - def __eq__(self, other): - return vars(self) == vars(other) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - attrs = [] - for a in ['exchange', 'topic', 'namespace', - 'version', 'server', 'fanout']: - v = getattr(self, a) - if v: - attrs.append((a, v)) - values = ', '.join(['%s=%s' % i for i in attrs]) - return '' - - def __hash__(self): - return id(self) diff --git a/oslo_messaging/tests/__init__.py b/oslo_messaging/tests/__init__.py deleted file mode 100644 index 0222c4e..0000000 --- a/oslo_messaging/tests/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 eNovance -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Import oslotest before importing test submodules to setup six.moves for mock -import oslotest - -try: - import eventlet -except ImportError: - pass -else: - # Ensure that eventlet monkey patching is enabled before loading the qpid - # module, otherwise qpid will hang - eventlet.monkey_patch() diff --git a/oslo_messaging/tests/drivers/__init__.py b/oslo_messaging/tests/drivers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/drivers/pika/__init__.py b/oslo_messaging/tests/drivers/pika/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/drivers/pika/test_message.py b/oslo_messaging/tests/drivers/pika/test_message.py deleted file mode 100644 index 5d29c8a..0000000 --- a/oslo_messaging/tests/drivers/pika/test_message.py +++ /dev/null @@ -1,623 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import functools -import unittest - -from concurrent import futures -from mock import mock -from mock import patch -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import pika - -import oslo_messaging -from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns -from oslo_messaging._drivers.pika_driver import pika_message as pika_drv_msg - - -class PikaIncomingMessageTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.Mock() - self._channel = mock.Mock() - - self._delivery_tag = 12345 - - self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag) - self._properties = pika.BasicProperties( - content_type="application/json", - headers={"version": "1.0"}, - ) - self._body = ( - b'{"_$_key_context":"context_value",' - b'"payload_key": "payload_value"}' - ) - - def test_message_body_parsing(self): - message = pika_drv_msg.PikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - self.assertEqual("context_value", - message.ctxt.get("key_context", None)) - self.assertEqual("payload_value", - message.message.get("payload_key", None)) - - def test_message_acknowledge(self): - message = pika_drv_msg.PikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - message.acknowledge() - - self.assertEqual(1, self._channel.basic_ack.call_count) - self.assertEqual({"delivery_tag": self._delivery_tag}, - self._channel.basic_ack.call_args[1]) - - def test_message_acknowledge_no_ack(self): - message = pika_drv_msg.PikaIncomingMessage( - self._pika_engine, None, self._method, self._properties, - self._body - ) - - message.acknowledge() - - self.assertEqual(0, self._channel.basic_ack.call_count) - - def test_message_requeue(self): - message = pika_drv_msg.PikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - message.requeue() - - self.assertEqual(1, self._channel.basic_nack.call_count) - self.assertEqual({"delivery_tag": self._delivery_tag, 'requeue': True}, - self._channel.basic_nack.call_args[1]) - - def test_message_requeue_no_ack(self): - message = pika_drv_msg.PikaIncomingMessage( - self._pika_engine, None, self._method, self._properties, - self._body - ) - - message.requeue() - - self.assertEqual(0, self._channel.basic_nack.call_count) - - -class RpcPikaIncomingMessageTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.Mock() - self._pika_engine.rpc_reply_retry_attempts = 3 - self._pika_engine.rpc_reply_retry_delay = 0.25 - - self._channel = mock.Mock() - - self._delivery_tag = 12345 - - self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag) - self._body = ( - b'{"_$_key_context":"context_value",' - b'"payload_key":"payload_value"}' - ) - self._properties = pika.BasicProperties( - content_type="application/json", - content_encoding="utf-8", - headers={"version": "1.0"}, - ) - - def test_call_message_body_parsing(self): - self._properties.correlation_id = 123456789 - self._properties.reply_to = "reply_queue" - - message = pika_drv_msg.RpcPikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - self.assertEqual("context_value", - message.ctxt.get("key_context", None)) - self.assertEqual(123456789, message.msg_id) - self.assertEqual("reply_queue", message.reply_q) - - self.assertEqual("payload_value", - message.message.get("payload_key", None)) - - def test_cast_message_body_parsing(self): - message = pika_drv_msg.RpcPikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - self.assertEqual("context_value", - message.ctxt.get("key_context", None)) - self.assertEqual(None, message.msg_id) - self.assertEqual(None, message.reply_q) - - self.assertEqual("payload_value", - message.message.get("payload_key", None)) - - @patch(("oslo_messaging._drivers.pika_driver.pika_message." - "PikaOutgoingMessage.send")) - def test_reply_for_cast_message(self, send_reply_mock): - message = pika_drv_msg.RpcPikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - self.assertEqual("context_value", - message.ctxt.get("key_context", None)) - self.assertEqual(None, message.msg_id) - self.assertEqual(None, message.reply_q) - - self.assertEqual("payload_value", - message.message.get("payload_key", None)) - - message.reply(reply=object()) - - self.assertEqual(0, send_reply_mock.call_count) - - @patch("oslo_messaging._drivers.pika_driver.pika_message." - "RpcReplyPikaOutgoingMessage") - @patch("retrying.retry") - def test_positive_reply_for_call_message(self, - retry_mock, - outgoing_message_mock): - self._properties.correlation_id = 123456789 - self._properties.reply_to = "reply_queue" - - message = pika_drv_msg.RpcPikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - self.assertEqual("context_value", - message.ctxt.get("key_context", None)) - self.assertEqual(123456789, message.msg_id) - self.assertEqual("reply_queue", message.reply_q) - - self.assertEqual("payload_value", - message.message.get("payload_key", None)) - reply = "all_fine" - message.reply(reply=reply) - - outgoing_message_mock.assert_called_once_with( - self._pika_engine, 123456789, failure_info=None, reply='all_fine', - content_encoding='utf-8', content_type='application/json' - ) - outgoing_message_mock().send.assert_called_once_with( - reply_q='reply_queue', stopwatch=mock.ANY, retrier=mock.ANY - ) - retry_mock.assert_called_once_with( - retry_on_exception=mock.ANY, stop_max_attempt_number=3, - wait_fixed=250.0 - ) - - @patch("oslo_messaging._drivers.pika_driver.pika_message." - "RpcReplyPikaOutgoingMessage") - @patch("retrying.retry") - def test_negative_reply_for_call_message(self, - retry_mock, - outgoing_message_mock): - self._properties.correlation_id = 123456789 - self._properties.reply_to = "reply_queue" - - message = pika_drv_msg.RpcPikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - self._body - ) - - self.assertEqual("context_value", - message.ctxt.get("key_context", None)) - self.assertEqual(123456789, message.msg_id) - self.assertEqual("reply_queue", message.reply_q) - - self.assertEqual("payload_value", - message.message.get("payload_key", None)) - - failure_info = object() - message.reply(failure=failure_info) - - outgoing_message_mock.assert_called_once_with( - self._pika_engine, 123456789, - failure_info=failure_info, - reply=None, - content_encoding='utf-8', - content_type='application/json' - ) - outgoing_message_mock().send.assert_called_once_with( - reply_q='reply_queue', stopwatch=mock.ANY, retrier=mock.ANY - ) - retry_mock.assert_called_once_with( - retry_on_exception=mock.ANY, stop_max_attempt_number=3, - wait_fixed=250.0 - ) - - -class RpcReplyPikaIncomingMessageTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.Mock() - self._pika_engine.allowed_remote_exmods = [ - pika_drv_cmns.EXCEPTIONS_MODULE, "oslo_messaging.exceptions" - ] - - self._channel = mock.Mock() - - self._delivery_tag = 12345 - - self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag) - - self._properties = pika.BasicProperties( - content_type="application/json", - content_encoding="utf-8", - headers={"version": "1.0"}, - correlation_id=123456789 - ) - - def test_positive_reply_message_body_parsing(self): - - body = b'{"s": "all fine"}' - - message = pika_drv_msg.RpcReplyPikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - body - ) - - self.assertEqual(123456789, message.msg_id) - self.assertIsNone(message.failure) - self.assertEqual("all fine", message.result) - - def test_negative_reply_message_body_parsing(self): - - body = (b'{' - b' "e": {' - b' "s": "Error message",' - b' "t": ["TRACE HERE"],' - b' "c": "MessagingException",' - b' "m": "oslo_messaging.exceptions"' - b' }' - b'}') - - message = pika_drv_msg.RpcReplyPikaIncomingMessage( - self._pika_engine, self._channel, self._method, self._properties, - body - ) - - self.assertEqual(123456789, message.msg_id) - self.assertIsNone(message.result) - self.assertEqual( - 'Error message\n' - 'TRACE HERE', - str(message.failure) - ) - self.assertIsInstance(message.failure, - oslo_messaging.MessagingException) - - -class PikaOutgoingMessageTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.MagicMock() - self._exchange = "it is exchange" - self._routing_key = "it is routing key" - self._expiration = 1 - self._stopwatch = ( - timeutils.StopWatch(duration=self._expiration).start() - ) - self._mandatory = object() - - self._message = {"msg_type": 1, "msg_str": "hello"} - self._context = {"request_id": 555, "token": "it is a token"} - - @patch("oslo_serialization.jsonutils.dumps", - new=functools.partial(jsonutils.dumps, sort_keys=True)) - def test_send_with_confirmation(self): - message = pika_drv_msg.PikaOutgoingMessage( - self._pika_engine, self._message, self._context - ) - - message.send( - exchange=self._exchange, - routing_key=self._routing_key, - confirm=True, - mandatory=self._mandatory, - persistent=True, - stopwatch=self._stopwatch, - retrier=None - ) - - self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.assert_called_once_with( - body=mock.ANY, - exchange=self._exchange, mandatory=self._mandatory, - properties=mock.ANY, - routing_key=self._routing_key - ) - - body = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["body"] - - self.assertEqual( - b'{"_$_request_id": 555, "_$_token": "it is a token", ' - b'"msg_str": "hello", "msg_type": 1}', - body - ) - - props = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["properties"] - - self.assertEqual('utf-8', props.content_encoding) - self.assertEqual('application/json', props.content_type) - self.assertEqual(2, props.delivery_mode) - self.assertTrue(self._expiration * 1000 - float(props.expiration) < - 100) - self.assertEqual({'version': '1.0'}, props.headers) - self.assertTrue(props.message_id) - - @patch("oslo_serialization.jsonutils.dumps", - new=functools.partial(jsonutils.dumps, sort_keys=True)) - def test_send_without_confirmation(self): - message = pika_drv_msg.PikaOutgoingMessage( - self._pika_engine, self._message, self._context - ) - - message.send( - exchange=self._exchange, - routing_key=self._routing_key, - confirm=False, - mandatory=self._mandatory, - persistent=False, - stopwatch=self._stopwatch, - retrier=None - ) - - self._pika_engine.connection_without_confirmation_pool.acquire( - ).__enter__().channel.publish.assert_called_once_with( - body=mock.ANY, - exchange=self._exchange, mandatory=self._mandatory, - properties=mock.ANY, - routing_key=self._routing_key - ) - - body = self._pika_engine.connection_without_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["body"] - - self.assertEqual( - b'{"_$_request_id": 555, "_$_token": "it is a token", ' - b'"msg_str": "hello", "msg_type": 1}', - body - ) - - props = self._pika_engine.connection_without_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["properties"] - - self.assertEqual('utf-8', props.content_encoding) - self.assertEqual('application/json', props.content_type) - self.assertEqual(1, props.delivery_mode) - self.assertTrue(self._expiration * 1000 - float(props.expiration) - < 100) - self.assertEqual({'version': '1.0'}, props.headers) - self.assertTrue(props.message_id) - - -class RpcPikaOutgoingMessageTestCase(unittest.TestCase): - def setUp(self): - self._exchange = "it is exchange" - self._routing_key = "it is routing key" - - self._pika_engine = mock.MagicMock() - self._pika_engine.get_rpc_exchange_name.return_value = self._exchange - self._pika_engine.get_rpc_queue_name.return_value = self._routing_key - - self._message = {"msg_type": 1, "msg_str": "hello"} - self._context = {"request_id": 555, "token": "it is a token"} - - @patch("oslo_serialization.jsonutils.dumps", - new=functools.partial(jsonutils.dumps, sort_keys=True)) - def test_send_cast_message(self): - message = pika_drv_msg.RpcPikaOutgoingMessage( - self._pika_engine, self._message, self._context - ) - - expiration = 1 - stopwatch = timeutils.StopWatch(duration=expiration).start() - - message.send( - exchange=self._exchange, - routing_key=self._routing_key, - reply_listener=None, - stopwatch=stopwatch, - retrier=None - ) - - self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.assert_called_once_with( - body=mock.ANY, - exchange=self._exchange, mandatory=True, - properties=mock.ANY, - routing_key=self._routing_key - ) - - body = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["body"] - - self.assertEqual( - b'{"_$_request_id": 555, "_$_token": "it is a token", ' - b'"msg_str": "hello", "msg_type": 1}', - body - ) - - props = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["properties"] - - self.assertEqual('utf-8', props.content_encoding) - self.assertEqual('application/json', props.content_type) - self.assertEqual(1, props.delivery_mode) - self.assertTrue(expiration * 1000 - float(props.expiration) < 100) - self.assertEqual({'version': '1.0'}, props.headers) - self.assertIsNone(props.correlation_id) - self.assertIsNone(props.reply_to) - self.assertTrue(props.message_id) - - @patch("oslo_serialization.jsonutils.dumps", - new=functools.partial(jsonutils.dumps, sort_keys=True)) - def test_send_call_message(self): - message = pika_drv_msg.RpcPikaOutgoingMessage( - self._pika_engine, self._message, self._context - ) - - expiration = 1 - stopwatch = timeutils.StopWatch(duration=expiration).start() - - result = "it is a result" - reply_queue_name = "reply_queue_name" - - future = futures.Future() - future.set_result(result) - reply_listener = mock.Mock() - reply_listener.register_reply_waiter.return_value = future - reply_listener.get_reply_qname.return_value = reply_queue_name - - res = message.send( - exchange=self._exchange, - routing_key=self._routing_key, - reply_listener=reply_listener, - stopwatch=stopwatch, - retrier=None - ) - - self.assertEqual(result, res) - - self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.assert_called_once_with( - body=mock.ANY, - exchange=self._exchange, mandatory=True, - properties=mock.ANY, - routing_key=self._routing_key - ) - - body = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["body"] - - self.assertEqual( - b'{"_$_request_id": 555, "_$_token": "it is a token", ' - b'"msg_str": "hello", "msg_type": 1}', - body - ) - - props = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["properties"] - - self.assertEqual('utf-8', props.content_encoding) - self.assertEqual('application/json', props.content_type) - self.assertEqual(1, props.delivery_mode) - self.assertTrue(expiration * 1000 - float(props.expiration) < 100) - self.assertEqual({'version': '1.0'}, props.headers) - self.assertEqual(message.msg_id, props.correlation_id) - self.assertEqual(reply_queue_name, props.reply_to) - self.assertTrue(props.message_id) - - -class RpcReplyPikaOutgoingMessageTestCase(unittest.TestCase): - def setUp(self): - self._reply_q = "reply_queue_name" - - self._expiration = 1 - self._stopwatch = ( - timeutils.StopWatch(duration=self._expiration).start() - ) - - self._pika_engine = mock.MagicMock() - - self._rpc_reply_exchange = "rpc_reply_exchange" - self._pika_engine.rpc_reply_exchange = self._rpc_reply_exchange - - self._msg_id = 12345567 - - @patch("oslo_serialization.jsonutils.dumps", - new=functools.partial(jsonutils.dumps, sort_keys=True)) - def test_success_message_send(self): - message = pika_drv_msg.RpcReplyPikaOutgoingMessage( - self._pika_engine, self._msg_id, reply="all_fine" - ) - - message.send(self._reply_q, stopwatch=self._stopwatch, retrier=None) - - self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.assert_called_once_with( - body=b'{"s": "all_fine"}', - exchange=self._rpc_reply_exchange, mandatory=True, - properties=mock.ANY, - routing_key=self._reply_q - ) - - props = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["properties"] - - self.assertEqual('utf-8', props.content_encoding) - self.assertEqual('application/json', props.content_type) - self.assertEqual(1, props.delivery_mode) - self.assertTrue(self._expiration * 1000 - float(props.expiration) < - 100) - self.assertEqual({'version': '1.0'}, props.headers) - self.assertEqual(message.msg_id, props.correlation_id) - self.assertIsNone(props.reply_to) - self.assertTrue(props.message_id) - - @patch("traceback.format_exception", new=lambda x, y, z: z) - @patch("oslo_serialization.jsonutils.dumps", - new=functools.partial(jsonutils.dumps, sort_keys=True)) - def test_failure_message_send(self): - failure_info = (oslo_messaging.MessagingException, - oslo_messaging.MessagingException("Error message"), - ['It is a trace']) - - message = pika_drv_msg.RpcReplyPikaOutgoingMessage( - self._pika_engine, self._msg_id, failure_info=failure_info - ) - - message.send(self._reply_q, stopwatch=self._stopwatch, retrier=None) - - self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.assert_called_once_with( - body=mock.ANY, - exchange=self._rpc_reply_exchange, - mandatory=True, - properties=mock.ANY, - routing_key=self._reply_q - ) - - body = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["body"] - self.assertEqual( - b'{"e": {"c": "MessagingException", ' - b'"m": "oslo_messaging.exceptions", "s": "Error message", ' - b'"t": ["It is a trace"]}}', - body - ) - - props = self._pika_engine.connection_with_confirmation_pool.acquire( - ).__enter__().channel.publish.call_args[1]["properties"] - - self.assertEqual('utf-8', props.content_encoding) - self.assertEqual('application/json', props.content_type) - self.assertEqual(1, props.delivery_mode) - self.assertTrue(self._expiration * 1000 - float(props.expiration) < - 100) - self.assertEqual({'version': '1.0'}, props.headers) - self.assertEqual(message.msg_id, props.correlation_id) - self.assertIsNone(props.reply_to) - self.assertTrue(props.message_id) diff --git a/oslo_messaging/tests/drivers/pika/test_poller.py b/oslo_messaging/tests/drivers/pika/test_poller.py deleted file mode 100644 index 445b338..0000000 --- a/oslo_messaging/tests/drivers/pika/test_poller.py +++ /dev/null @@ -1,482 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading -import time -import unittest - -from concurrent import futures -import mock - -from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc -from oslo_messaging._drivers.pika_driver import pika_poller - - -class PikaPollerTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.Mock() - self._poller_connection_mock = mock.Mock() - self._poller_channel_mock = mock.Mock() - self._poller_connection_mock.channel.return_value = ( - self._poller_channel_mock - ) - self._pika_engine.create_connection.return_value = ( - self._poller_connection_mock - ) - - self._executor = futures.ThreadPoolExecutor(1) - - def timer_task(timeout, callback): - time.sleep(timeout) - callback() - - self._poller_connection_mock.add_timeout.side_effect = ( - lambda *args: self._executor.submit(timer_task, *args) - ) - - self._prefetch_count = 123 - - @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." - "_declare_queue_binding") - def test_start(self, declare_queue_binding_mock): - poller = pika_poller.PikaPoller( - self._pika_engine, 1, None, self._prefetch_count, None - ) - - poller.start(None) - - self.assertTrue(self._pika_engine.create_connection.called) - self.assertTrue(self._poller_connection_mock.channel.called) - self.assertTrue(declare_queue_binding_mock.called) - - def test_start_when_connection_unavailable(self): - poller = pika_poller.PikaPoller( - self._pika_engine, 1, None, self._prefetch_count, None - ) - - self._pika_engine.create_connection.side_effect = ( - pika_drv_exc.EstablishConnectionException - ) - - # start() should not raise socket.timeout exception - poller.start(None) - - # stop is needed to stop reconnection background job - poller.stop() - - @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." - "_declare_queue_binding") - def test_message_processing(self, declare_queue_binding_mock): - res = [] - - def on_incoming_callback(incoming): - res.append(incoming) - - incoming_message_class_mock = mock.Mock() - poller = pika_poller.PikaPoller( - self._pika_engine, 1, None, self._prefetch_count, - incoming_message_class=incoming_message_class_mock - ) - unused = object() - method = object() - properties = object() - body = object() - - poller.start(on_incoming_callback) - poller._on_message_with_ack_callback( - unused, method, properties, body - ) - - self.assertEqual(1, len(res)) - - self.assertEqual([incoming_message_class_mock.return_value], res[0]) - incoming_message_class_mock.assert_called_once_with( - self._pika_engine, self._poller_channel_mock, method, properties, - body - ) - - self.assertTrue(self._pika_engine.create_connection.called) - self.assertTrue(self._poller_connection_mock.channel.called) - - self.assertTrue(declare_queue_binding_mock.called) - - @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." - "_declare_queue_binding") - def test_message_processing_batch(self, declare_queue_binding_mock): - incoming_message_class_mock = mock.Mock() - - n = 10 - params = [] - - res = [] - - def on_incoming_callback(incoming): - res.append(incoming) - - poller = pika_poller.PikaPoller( - self._pika_engine, n, None, self._prefetch_count, - incoming_message_class=incoming_message_class_mock - ) - - for i in range(n): - params.append((object(), object(), object(), object())) - - poller.start(on_incoming_callback) - - for i in range(n): - poller._on_message_with_ack_callback( - *params[i] - ) - - self.assertEqual(1, len(res)) - self.assertEqual(10, len(res[0])) - self.assertEqual(n, incoming_message_class_mock.call_count) - - for i in range(n): - self.assertEqual(incoming_message_class_mock.return_value, - res[0][i]) - self.assertEqual( - (self._pika_engine, self._poller_channel_mock) + params[i][1:], - incoming_message_class_mock.call_args_list[i][0] - ) - - self.assertTrue(self._pika_engine.create_connection.called) - self.assertTrue(self._poller_connection_mock.channel.called) - - self.assertTrue(declare_queue_binding_mock.called) - - @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." - "_declare_queue_binding") - def test_message_processing_batch_with_timeout(self, - declare_queue_binding_mock): - incoming_message_class_mock = mock.Mock() - - n = 10 - timeout = 1 - - res = [] - evt = threading.Event() - - def on_incoming_callback(incoming): - res.append(incoming) - evt.set() - - poller = pika_poller.PikaPoller( - self._pika_engine, n, timeout, self._prefetch_count, - incoming_message_class=incoming_message_class_mock - ) - - params = [] - - success_count = 5 - - poller.start(on_incoming_callback) - - for i in range(n): - params.append((object(), object(), object(), object())) - - for i in range(success_count): - poller._on_message_with_ack_callback( - *params[i] - ) - - self.assertTrue(evt.wait(timeout * 2)) - - self.assertEqual(1, len(res)) - self.assertEqual(success_count, len(res[0])) - self.assertEqual(success_count, incoming_message_class_mock.call_count) - - for i in range(success_count): - self.assertEqual(incoming_message_class_mock.return_value, - res[0][i]) - self.assertEqual( - (self._pika_engine, self._poller_channel_mock) + params[i][1:], - incoming_message_class_mock.call_args_list[i][0] - ) - - self.assertTrue(self._pika_engine.create_connection.called) - self.assertTrue(self._poller_connection_mock.channel.called) - - self.assertTrue(declare_queue_binding_mock.called) - - -class RpcServicePikaPollerTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.Mock() - self._poller_connection_mock = mock.Mock() - self._poller_channel_mock = mock.Mock() - self._poller_connection_mock.channel.return_value = ( - self._poller_channel_mock - ) - self._pika_engine.create_connection.return_value = ( - self._poller_connection_mock - ) - - self._pika_engine.get_rpc_queue_name.side_effect = ( - lambda topic, server, no_ack, worker=False: - "_".join([topic, str(server), str(no_ack), str(worker)]) - ) - - self._pika_engine.get_rpc_exchange_name.side_effect = ( - lambda exchange: exchange - ) - - self._prefetch_count = 123 - self._target = mock.Mock(exchange="exchange", topic="topic", - server="server") - self._pika_engine.rpc_queue_expiration = 12345 - - @mock.patch("oslo_messaging._drivers.pika_driver.pika_message." - "RpcPikaIncomingMessage") - def test_declare_rpc_queue_bindings(self, rpc_pika_incoming_message_mock): - poller = pika_poller.RpcServicePikaPoller( - self._pika_engine, self._target, 1, None, - self._prefetch_count - ) - - poller.start(None) - - self.assertTrue(self._pika_engine.create_connection.called) - self.assertTrue(self._poller_connection_mock.channel.called) - - declare_queue_binding_by_channel_mock = ( - self._pika_engine.declare_queue_binding_by_channel - ) - - self.assertEqual( - 6, declare_queue_binding_by_channel_mock.call_count - ) - - declare_queue_binding_by_channel_mock.assert_has_calls(( - mock.call( - channel=self._poller_channel_mock, durable=False, - exchange="exchange", - exchange_type='direct', - queue="topic_None_True_False", - queue_expiration=12345, - routing_key="topic_None_True_False" - ), - mock.call( - channel=self._poller_channel_mock, durable=False, - exchange="exchange", - exchange_type='direct', - queue="topic_server_True_False", - queue_expiration=12345, - routing_key="topic_server_True_False" - ), - mock.call( - channel=self._poller_channel_mock, durable=False, - exchange="exchange", - exchange_type='direct', - queue="topic_server_True_True", - queue_expiration=12345, - routing_key="topic_all_workers_True_False" - ), - mock.call( - channel=self._poller_channel_mock, durable=False, - exchange="exchange", - exchange_type='direct', - queue="topic_None_False_False", - queue_expiration=12345, - routing_key="topic_None_False_False" - ), - mock.call( - channel=self._poller_channel_mock, durable=False, - exchange="exchange", - exchange_type='direct', - queue="topic_server_False_False", - queue_expiration=12345, - routing_key='topic_server_False_False' - ), - mock.call( - channel=self._poller_channel_mock, durable=False, - exchange="exchange", - exchange_type='direct', - queue="topic_server_False_True", - queue_expiration=12345, - routing_key='topic_all_workers_False_False' - ) - )) - - -class RpcReplyServicePikaPollerTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.Mock() - self._poller_connection_mock = mock.Mock() - self._poller_channel_mock = mock.Mock() - self._poller_connection_mock.channel.return_value = ( - self._poller_channel_mock - ) - self._pika_engine.create_connection.return_value = ( - self._poller_connection_mock - ) - - self._prefetch_count = 123 - self._exchange = "rpc_reply_exchange" - self._queue = "rpc_reply_queue" - - self._pika_engine.rpc_reply_retry_delay = 12132543456 - - self._pika_engine.rpc_queue_expiration = 12345 - self._pika_engine.rpc_reply_retry_attempts = 3 - - def test_declare_rpc_reply_queue_binding(self): - poller = pika_poller.RpcReplyPikaPoller( - self._pika_engine, self._exchange, self._queue, 1, None, - self._prefetch_count, - ) - - poller.start(None) - poller.stop() - - declare_queue_binding_by_channel_mock = ( - self._pika_engine.declare_queue_binding_by_channel - ) - - self.assertEqual( - 1, declare_queue_binding_by_channel_mock.call_count - ) - - declare_queue_binding_by_channel_mock.assert_called_once_with( - channel=self._poller_channel_mock, durable=False, - exchange='rpc_reply_exchange', exchange_type='direct', - queue='rpc_reply_queue', queue_expiration=12345, - routing_key='rpc_reply_queue' - ) - - -class NotificationPikaPollerTestCase(unittest.TestCase): - def setUp(self): - self._pika_engine = mock.Mock() - self._poller_connection_mock = mock.Mock() - self._poller_channel_mock = mock.Mock() - self._poller_connection_mock.channel.return_value = ( - self._poller_channel_mock - ) - self._pika_engine.create_connection.return_value = ( - self._poller_connection_mock - ) - - self._prefetch_count = 123 - self._target_and_priorities = ( - ( - mock.Mock(exchange="exchange1", topic="topic1", - server="server1"), 1 - ), - ( - mock.Mock(exchange="exchange1", topic="topic1"), 2 - ), - ( - mock.Mock(exchange="exchange2", topic="topic2",), 1 - ), - ) - self._pika_engine.notification_persistence = object() - - def test_declare_notification_queue_bindings_default_queue(self): - poller = pika_poller.NotificationPikaPoller( - self._pika_engine, self._target_and_priorities, 1, None, - self._prefetch_count, None - ) - - poller.start(None) - - self.assertTrue(self._pika_engine.create_connection.called) - self.assertTrue(self._poller_connection_mock.channel.called) - - declare_queue_binding_by_channel_mock = ( - self._pika_engine.declare_queue_binding_by_channel - ) - - self.assertEqual( - 3, declare_queue_binding_by_channel_mock.call_count - ) - - declare_queue_binding_by_channel_mock.assert_has_calls(( - mock.call( - channel=self._poller_channel_mock, - durable=self._pika_engine.notification_persistence, - exchange="exchange1", - exchange_type='direct', - queue="topic1.1", - queue_expiration=None, - routing_key="topic1.1" - ), - mock.call( - channel=self._poller_channel_mock, - durable=self._pika_engine.notification_persistence, - exchange="exchange1", - exchange_type='direct', - queue="topic1.2", - queue_expiration=None, - routing_key="topic1.2" - ), - mock.call( - channel=self._poller_channel_mock, - durable=self._pika_engine.notification_persistence, - exchange="exchange2", - exchange_type='direct', - queue="topic2.1", - queue_expiration=None, - routing_key="topic2.1" - ) - )) - - def test_declare_notification_queue_bindings_custom_queue(self): - poller = pika_poller.NotificationPikaPoller( - self._pika_engine, self._target_and_priorities, 1, None, - self._prefetch_count, "custom_queue_name" - ) - - poller.start(None) - - self.assertTrue(self._pika_engine.create_connection.called) - self.assertTrue(self._poller_connection_mock.channel.called) - - declare_queue_binding_by_channel_mock = ( - self._pika_engine.declare_queue_binding_by_channel - ) - - self.assertEqual( - 3, declare_queue_binding_by_channel_mock.call_count - ) - - declare_queue_binding_by_channel_mock.assert_has_calls(( - mock.call( - channel=self._poller_channel_mock, - durable=self._pika_engine.notification_persistence, - exchange="exchange1", - exchange_type='direct', - queue="custom_queue_name", - queue_expiration=None, - routing_key="topic1.1" - ), - mock.call( - channel=self._poller_channel_mock, - durable=self._pika_engine.notification_persistence, - exchange="exchange1", - exchange_type='direct', - queue="custom_queue_name", - queue_expiration=None, - routing_key="topic1.2" - ), - mock.call( - channel=self._poller_channel_mock, - durable=self._pika_engine.notification_persistence, - exchange="exchange2", - exchange_type='direct', - queue="custom_queue_name", - queue_expiration=None, - routing_key="topic2.1" - ) - )) diff --git a/oslo_messaging/tests/drivers/test_amqp_driver.py b/oslo_messaging/tests/drivers/test_amqp_driver.py deleted file mode 100644 index db35308..0000000 --- a/oslo_messaging/tests/drivers/test_amqp_driver.py +++ /dev/null @@ -1,1098 +0,0 @@ -# Copyright (C) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import select -import shutil -import socket -import subprocess -import threading -import time -import uuid - -from oslo_utils import importutils -from six import moves -from string import Template -import testtools - -import oslo_messaging -from oslo_messaging.tests import utils as test_utils - -# TODO(kgiusti) Conditionally run these tests only if the necessary -# dependencies are installed. This should be removed once the proton libraries -# are available in the base repos for all supported platforms. -pyngus = importutils.try_import("pyngus") -if pyngus: - import oslo_messaging._drivers.impl_amqp1 as amqp_driver - -# The Cyrus-based SASL tests can only be run if the installed version of proton -# has been built with Cyrus SASL support. -_proton = importutils.try_import("proton") -CYRUS_ENABLED = (pyngus and pyngus.VERSION >= (2, 0, 0) and _proton - and getattr(_proton.SASL, "extended", lambda: False)()) - -LOG = logging.getLogger(__name__) - - -def _wait_until(predicate, timeout): - deadline = timeout + time.time() - while not predicate() and deadline > time.time(): - time.sleep(0.1) - - -class _ListenerThread(threading.Thread): - """Run a blocking listener in a thread.""" - def __init__(self, listener, msg_count): - super(_ListenerThread, self).__init__() - self.listener = listener - self.msg_count = msg_count - self.messages = moves.queue.Queue() - self.daemon = True - self.started = threading.Event() - self.start() - self.started.wait() - - def run(self): - LOG.debug("Listener started") - self.started.set() - while self.msg_count > 0: - in_msg = self.listener.poll()[0] - self.messages.put(in_msg) - self.msg_count -= 1 - if in_msg.message.get('method') == 'echo': - in_msg.reply(reply={'correlation-id': - in_msg.message.get('id')}) - LOG.debug("Listener stopped") - - def get_messages(self): - """Returns a list of all received messages.""" - msgs = [] - try: - while True: - m = self.messages.get(False) - msgs.append(m) - except moves.queue.Empty: - pass - return msgs - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestProtonDriverLoad(test_utils.BaseTestCase): - - def setUp(self): - super(TestProtonDriverLoad, self).setUp() - self.messaging_conf.transport_driver = 'amqp' - - def test_driver_load(self): - transport = oslo_messaging.get_transport(self.conf) - self.assertIsInstance(transport._driver, - amqp_driver.ProtonDriver) - - -class _AmqpBrokerTestCase(test_utils.BaseTestCase): - - @testtools.skipUnless(pyngus, "proton modules not present") - def setUp(self): - super(_AmqpBrokerTestCase, self).setUp() - self._broker = FakeBroker() - self._broker_addr = "amqp://%s:%d" % (self._broker.host, - self._broker.port) - self._broker_url = oslo_messaging.TransportURL.parse( - self.conf, self._broker_addr) - self._broker.start() - - def tearDown(self): - super(_AmqpBrokerTestCase, self).tearDown() - self._broker.stop() - - -class TestAmqpSend(_AmqpBrokerTestCase): - """Test sending and receiving messages.""" - - def test_driver_unconnected_cleanup(self): - """Verify the driver can cleanly shutdown even if never connected.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - driver.cleanup() - - def test_listener_cleanup(self): - """Verify unused listener can cleanly shutdown.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = driver.listen(target, None, None)._poll_style_listener - self.assertIsInstance(listener, amqp_driver.ProtonListener) - driver.cleanup() - - def test_send_no_reply(self): - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - rc = driver.send(target, {"context": True}, - {"msg": "value"}, wait_for_reply=False) - self.assertIsNone(rc) - listener.join(timeout=30) - self.assertFalse(listener.isAlive()) - self.assertEqual({"msg": "value"}, listener.messages.get().message) - driver.cleanup() - - def test_send_exchange_with_reply(self): - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target1 = oslo_messaging.Target(topic="test-topic", exchange="e1") - listener1 = _ListenerThread( - driver.listen(target1, None, None)._poll_style_listener, 1) - target2 = oslo_messaging.Target(topic="test-topic", exchange="e2") - listener2 = _ListenerThread( - driver.listen(target2, None, None)._poll_style_listener, 1) - - rc = driver.send(target1, {"context": "whatever"}, - {"method": "echo", "id": "e1"}, - wait_for_reply=True, - timeout=30) - self.assertIsNotNone(rc) - self.assertEqual('e1', rc.get('correlation-id')) - - rc = driver.send(target2, {"context": "whatever"}, - {"method": "echo", "id": "e2"}, - wait_for_reply=True, - timeout=30) - self.assertIsNotNone(rc) - self.assertEqual('e2', rc.get('correlation-id')) - - listener1.join(timeout=30) - self.assertFalse(listener1.isAlive()) - listener2.join(timeout=30) - self.assertFalse(listener2.isAlive()) - driver.cleanup() - - def test_messaging_patterns(self): - """Verify the direct, shared, and fanout message patterns work.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target1 = oslo_messaging.Target(topic="test-topic", server="server1") - listener1 = _ListenerThread( - driver.listen(target1, None, None)._poll_style_listener, 4) - target2 = oslo_messaging.Target(topic="test-topic", server="server2") - listener2 = _ListenerThread( - driver.listen(target2, None, None)._poll_style_listener, 3) - - shared_target = oslo_messaging.Target(topic="test-topic") - fanout_target = oslo_messaging.Target(topic="test-topic", - fanout=True) - # this should go to only one server: - driver.send(shared_target, {"context": "whatever"}, - {"method": "echo", "id": "either-1"}, - wait_for_reply=True) - self.assertEqual(1, self._broker.topic_count) - self.assertEqual(1, self._broker.direct_count) # reply - - # this should go to the other server: - driver.send(shared_target, {"context": "whatever"}, - {"method": "echo", "id": "either-2"}, - wait_for_reply=True) - self.assertEqual(2, self._broker.topic_count) - self.assertEqual(2, self._broker.direct_count) # reply - - # these should only go to listener1: - driver.send(target1, {"context": "whatever"}, - {"method": "echo", "id": "server1-1"}, - wait_for_reply=True) - - driver.send(target1, {"context": "whatever"}, - {"method": "echo", "id": "server1-2"}, - wait_for_reply=True) - self.assertEqual(6, self._broker.direct_count) # 2X(send+reply) - - # this should only go to listener2: - driver.send(target2, {"context": "whatever"}, - {"method": "echo", "id": "server2"}, - wait_for_reply=True) - self.assertEqual(8, self._broker.direct_count) - - # both listeners should get a copy: - driver.send(fanout_target, {"context": "whatever"}, - {"method": "echo", "id": "fanout"}) - - listener1.join(timeout=30) - self.assertFalse(listener1.isAlive()) - listener2.join(timeout=30) - self.assertFalse(listener2.isAlive()) - self.assertEqual(1, self._broker.fanout_count) - - listener1_ids = [x.message.get('id') for x in listener1.get_messages()] - listener2_ids = [x.message.get('id') for x in listener2.get_messages()] - - self.assertTrue('fanout' in listener1_ids and - 'fanout' in listener2_ids) - self.assertTrue('server1-1' in listener1_ids and - 'server1-1' not in listener2_ids) - self.assertTrue('server1-2' in listener1_ids and - 'server1-2' not in listener2_ids) - self.assertTrue('server2' in listener2_ids and - 'server2' not in listener1_ids) - if 'either-1' in listener1_ids: - self.assertTrue('either-2' in listener2_ids and - 'either-2' not in listener1_ids and - 'either-1' not in listener2_ids) - else: - self.assertTrue('either-2' in listener1_ids and - 'either-2' not in listener2_ids and - 'either-1' in listener2_ids) - driver.cleanup() - - def test_send_timeout(self): - """Verify send timeout.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - - # the listener will drop this message: - try: - driver.send(target, - {"context": "whatever"}, - {"method": "drop"}, - wait_for_reply=True, - timeout=1.0) - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex) - else: - self.assertTrue(False, "No Exception raised!") - listener.join(timeout=30) - self.assertFalse(listener.isAlive()) - driver.cleanup() - - -class TestAmqpNotification(_AmqpBrokerTestCase): - """Test sending and receiving notifications.""" - - def test_notification(self): - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - notifications = [(oslo_messaging.Target(topic="topic-1"), 'info'), - (oslo_messaging.Target(topic="topic-1"), 'error'), - (oslo_messaging.Target(topic="topic-2"), 'debug')] - nl = driver.listen_for_notifications( - notifications, None, None, None)._poll_style_listener - - # send one for each support version: - msg_count = len(notifications) * 2 - listener = _ListenerThread(nl, msg_count) - targets = ['topic-1.info', - 'topic-1.bad', # will raise MessageDeliveryFailure - 'bad-topic.debug', # will raise MessageDeliveryFailure - 'topic-1.error', - 'topic-2.debug'] - - excepted_targets = [] - for version in (1.0, 2.0): - for t in targets: - try: - driver.send_notification(oslo_messaging.Target(topic=t), - "context", {'target': t}, - version) - except oslo_messaging.MessageDeliveryFailure: - excepted_targets.append(t) - - listener.join(timeout=30) - self.assertFalse(listener.isAlive()) - topics = [x.message.get('target') for x in listener.get_messages()] - self.assertEqual(msg_count, len(topics)) - self.assertEqual(2, topics.count('topic-1.info')) - self.assertEqual(2, topics.count('topic-1.error')) - self.assertEqual(2, topics.count('topic-2.debug')) - self.assertEqual(4, self._broker.dropped_count) - self.assertEqual(0, excepted_targets.count('topic-1.bad')) - self.assertEqual(0, excepted_targets.count('bad-topic.debug')) - driver.cleanup() - - -@testtools.skipUnless(pyngus and pyngus.VERSION < (2, 0, 0), - "pyngus module not present") -class TestAuthentication(test_utils.BaseTestCase): - """Test user authentication using the old pyngus API""" - def setUp(self): - super(TestAuthentication, self).setUp() - # for simplicity, encode the credentials as they would appear 'on the - # wire' in a SASL frame - username and password prefixed by zero. - user_credentials = ["\0joe\0secret"] - self._broker = FakeBroker(sasl_mechanisms="PLAIN", - user_credentials=user_credentials) - self._broker.start() - - def tearDown(self): - super(TestAuthentication, self).tearDown() - self._broker.stop() - - def test_authentication_ok(self): - """Verify that username and password given in TransportHost are - accepted by the broker. - """ - - addr = "amqp://joe:secret@%s:%d" % (self._broker.host, - self._broker.port) - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - rc = driver.send(target, {"context": True}, - {"method": "echo"}, wait_for_reply=True) - self.assertIsNotNone(rc) - listener.join(timeout=30) - self.assertFalse(listener.isAlive()) - driver.cleanup() - - def test_authentication_failure(self): - """Verify that a bad password given in TransportHost is - rejected by the broker. - """ - - addr = "amqp://joe:badpass@%s:%d" % (self._broker.host, - self._broker.port) - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, - target, {"context": True}, - {"method": "echo"}, - wait_for_reply=True, - timeout=2.0) - driver.cleanup() - - -@testtools.skipUnless(CYRUS_ENABLED, "Cyrus SASL not supported") -class TestCyrusAuthentication(test_utils.BaseTestCase): - """Test the driver's Cyrus SASL integration""" - - _conf_dir = None - - # Note: don't add ANONYMOUS or EXTERNAL mechs without updating the - # test_authentication_bad_mechs test below - _mechs = "DIGEST-MD5 SCRAM-SHA-1 CRAM-MD5 PLAIN" - - @classmethod - def setUpClass(cls): - # The Cyrus library can only be initialized once per _process_ - # Create a SASL configuration and user database, - # add a user 'joe' with password 'secret': - cls._conf_dir = "/tmp/amqp1_tests_%s" % os.getpid() - # no, we cannot use tempfile.mkdtemp() as it will 'helpfully' remove - # the temp dir after the first test is run (?why?) - os.makedirs(cls._conf_dir) - db = os.path.join(cls._conf_dir, 'openstack.sasldb') - _t = "echo secret | saslpasswd2 -c -p -f ${db} joe" - cmd = Template(_t).substitute(db=db) - try: - subprocess.check_call(args=cmd, shell=True) - except Exception: - shutil.rmtree(cls._conf_dir, ignore_errors=True) - cls._conf_dir = None - return - - # configure the SASL server: - conf = os.path.join(cls._conf_dir, 'openstack.conf') - t = Template("""sasldb_path: ${db} -pwcheck_method: auxprop -auxprop_plugin: sasldb -mech_list: ${mechs} -""") - with open(conf, 'w') as f: - f.write(t.substitute(db=db, mechs=cls._mechs)) - - @classmethod - def tearDownClass(cls): - if cls._conf_dir: - shutil.rmtree(cls._conf_dir, ignore_errors=True) - - def setUp(self): - # fire up a test broker with the SASL config: - super(TestCyrusAuthentication, self).setUp() - if TestCyrusAuthentication._conf_dir is None: - self.skipTest("Cyrus SASL tools not installed") - _mechs = TestCyrusAuthentication._mechs - _dir = TestCyrusAuthentication._conf_dir - self._broker = FakeBroker(sasl_mechanisms=_mechs, - user_credentials=["\0joe\0secret"], - sasl_config_dir=_dir, - sasl_config_name="openstack") - self._broker.start() - self.messaging_conf.transport_driver = 'amqp' - self.conf = self.messaging_conf.conf - - def tearDown(self): - if self._broker: - self._broker.stop() - self._broker = None - super(TestCyrusAuthentication, self).tearDown() - - def test_authentication_ok(self): - """Verify that username and password given in TransportHost are - accepted by the broker. - """ - addr = "amqp://joe:secret@%s:%d" % (self._broker.host, - self._broker.port) - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - rc = driver.send(target, {"context": True}, - {"method": "echo"}, wait_for_reply=True) - self.assertIsNotNone(rc) - listener.join(timeout=30) - self.assertFalse(listener.isAlive()) - driver.cleanup() - - def test_authentication_failure(self): - """Verify that a bad password given in TransportHost is - rejected by the broker. - """ - - addr = "amqp://joe:badpass@%s:%d" % (self._broker.host, - self._broker.port) - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, - target, {"context": True}, - {"method": "echo"}, - wait_for_reply=True, - timeout=2.0) - driver.cleanup() - - def test_authentication_bad_mechs(self): - """Verify that the connection fails if the client's SASL mechanisms do - not match the broker's. - """ - self.config(sasl_mechanisms="EXTERNAL ANONYMOUS", - group="oslo_messaging_amqp") - addr = "amqp://joe:secret@%s:%d" % (self._broker.host, - self._broker.port) - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, - target, {"context": True}, - {"method": "echo"}, - wait_for_reply=True, - timeout=2.0) - driver.cleanup() - - def test_authentication_default_username(self): - """Verify that a configured username/password is used if none appears - in the URL. - """ - addr = "amqp://%s:%d" % (self._broker.host, self._broker.port) - self.config(username="joe", - password="secret", - group="oslo_messaging_amqp") - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - rc = driver.send(target, {"context": True}, - {"method": "echo"}, wait_for_reply=True) - self.assertIsNotNone(rc) - listener.join(timeout=30) - self.assertFalse(listener.isAlive()) - driver.cleanup() - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestFailover(test_utils.BaseTestCase): - - def setUp(self): - super(TestFailover, self).setUp() - self._brokers = [FakeBroker(), FakeBroker()] - self._primary = 0 - self._backup = 1 - hosts = [] - for broker in self._brokers: - hosts.append(oslo_messaging.TransportHost(hostname=broker.host, - port=broker.port)) - self._broker_url = oslo_messaging.TransportURL(self.conf, - transport="amqp", - hosts=hosts) - - def tearDown(self): - super(TestFailover, self).tearDown() - for broker in self._brokers: - if broker.isAlive(): - broker.stop() - - def _failover(self, fail_broker): - self._brokers[0].start() - self._brokers[1].start() - - # self.config(trace=True, group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - - target = oslo_messaging.Target(topic="my-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 2) - - # wait for listener links to come up on either broker - # 4 == 3 links per listener + 1 for the global reply queue - predicate = lambda: ((self._brokers[0].sender_link_count == 4) or - (self._brokers[1].sender_link_count == 4)) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - if self._brokers[1].sender_link_count == 4: - self._primary = 1 - self._backup = 0 - - rc = driver.send(target, {"context": "whatever"}, - {"method": "echo", "id": "echo-1"}, - wait_for_reply=True, - timeout=30) - self.assertIsNotNone(rc) - self.assertEqual('echo-1', rc.get('correlation-id')) - - # 1 request msg, 1 response: - self.assertEqual(1, self._brokers[self._primary].topic_count) - self.assertEqual(1, self._brokers[self._primary].direct_count) - - # invoke failover method - fail_broker(self._brokers[self._primary]) - - # wait for listener links to re-establish on broker 1 - # 4 = 3 links per listener + 1 for the global reply queue - predicate = lambda: self._brokers[self._backup].sender_link_count == 4 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - rc = driver.send(target, - {"context": "whatever"}, - {"method": "echo", "id": "echo-2"}, - wait_for_reply=True, - timeout=2) - self.assertIsNotNone(rc) - self.assertEqual('echo-2', rc.get('correlation-id')) - - # 1 request msg, 1 response: - self.assertEqual(1, self._brokers[self._backup].topic_count) - self.assertEqual(1, self._brokers[self._backup].direct_count) - - listener.join(timeout=30) - self.assertFalse(listener.isAlive()) - - # note: stopping the broker first tests cleaning up driver without a - # connection active - self._brokers[self._backup].stop() - driver.cleanup() - - def test_broker_crash(self): - """Simulate a failure of one broker.""" - def _meth(broker): - # fail broker: - broker.stop() - time.sleep(0.5) - self._failover(_meth) - - def test_broker_shutdown(self): - """Simulate a normal shutdown of a broker.""" - def _meth(broker): - broker.stop(clean=True) - time.sleep(0.5) - self._failover(_meth) - - def test_heartbeat_failover(self): - """Simulate broker heartbeat timeout.""" - def _meth(broker): - # keep alive heartbeat from primary broker will stop, which should - # force failover to backup broker in about two seconds - broker.pause() - self.config(idle_timeout=2, group="oslo_messaging_amqp") - self._failover(_meth) - self._brokers[self._primary].stop() - - def test_listener_failover(self): - """Verify that Listeners sharing the same topic are re-established - after failover. - """ - self._brokers[0].start() - # self.config(trace=True, group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - - target = oslo_messaging.Target(topic="my-topic") - bcast = oslo_messaging.Target(topic="my-topic", fanout=True) - listener1 = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 2) - listener2 = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 2) - - # wait for 7 sending links to become active on the broker. - # 7 = 3 links per Listener + 1 global reply link - predicate = lambda: self._brokers[0].sender_link_count == 7 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.send(bcast, {"context": "whatever"}, - {"method": "ignore", "id": "echo-1"}) - - # 1 message per listener - predicate = lambda: self._brokers[0].fanout_sent_count == 2 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - # start broker 1 then shutdown broker 0: - self._brokers[1].start() - self._brokers[0].stop(clean=True) - - # wait again for 7 sending links to re-establish on broker 1 - predicate = lambda: self._brokers[1].sender_link_count == 7 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.send(bcast, {"context": "whatever"}, - {"method": "ignore", "id": "echo-2"}) - - # 1 message per listener - predicate = lambda: self._brokers[1].fanout_sent_count == 2 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - listener1.join(timeout=30) - listener2.join(timeout=30) - self.assertFalse(listener1.isAlive() or listener2.isAlive()) - - driver.cleanup() - self._brokers[1].stop() - - -class FakeBroker(threading.Thread): - """A test AMQP message 'broker'.""" - - if pyngus: - class Connection(pyngus.ConnectionEventHandler): - """A single AMQP connection.""" - - def __init__(self, server, socket_, name, - sasl_mechanisms, user_credentials, - sasl_config_dir, sasl_config_name): - """Create a Connection using socket_.""" - self.socket = socket_ - self.name = name - self.server = server - self.sasl_mechanisms = sasl_mechanisms - self.user_credentials = user_credentials - properties = {'x-server': True} - if self.sasl_mechanisms: - properties['x-sasl-mechs'] = self.sasl_mechanisms - if "ANONYMOUS" not in self.sasl_mechanisms: - properties['x-require-auth'] = True - if sasl_config_dir: - properties['x-sasl-config-dir'] = sasl_config_dir - if sasl_config_name: - properties['x-sasl-config-name'] = sasl_config_name - - self.connection = server.container.create_connection( - name, self, properties) - self.connection.user_context = self - if pyngus.VERSION < (2, 0, 0): - # older versions of pyngus don't recognize the sasl - # connection properties, so configure them manually: - if sasl_mechanisms: - self.connection.pn_sasl.mechanisms(sasl_mechanisms) - self.connection.pn_sasl.server() - self.connection.open() - self.sender_links = set() - self.receiver_links = set() - self.dead_links = set() - - def destroy(self): - """Destroy the test connection.""" - for link in self.sender_links | self.receiver_links: - link.destroy() - self.sender_links.clear() - self.receiver_links.clear() - self.dead_links.clear() - self.connection.destroy() - self.connection = None - self.socket.close() - self.socket = None - - def fileno(self): - """Allows use of this in a select() call.""" - return self.socket.fileno() - - def process_input(self): - """Called when socket is read-ready.""" - try: - pyngus.read_socket_input(self.connection, self.socket) - self.connection.process(time.time()) - except socket.error: - self._socket_error() - - def send_output(self): - """Called when socket is write-ready.""" - try: - pyngus.write_socket_output(self.connection, - self.socket) - self.connection.process(time.time()) - except socket.error: - self._socket_error() - - def _socket_error(self): - self.connection.close_input() - self.connection.close_output() - # the broker will clean up in its main loop - - # Pyngus ConnectionEventHandler callbacks: - - def connection_active(self, connection): - self.server.connection_count += 1 - - def connection_remote_closed(self, connection, reason): - """Peer has closed the connection.""" - self.connection.close() - - def connection_closed(self, connection): - """Connection close completed.""" - self.server.connection_count -= 1 - - def connection_failed(self, connection, error): - """Connection failure detected.""" - self.connection_closed(connection) - - def sender_requested(self, connection, link_handle, - name, requested_source, properties): - """Create a new message source.""" - addr = requested_source or "source-" + uuid.uuid4().hex - link = FakeBroker.SenderLink(self.server, self, - link_handle, addr) - self.sender_links.add(link) - - def receiver_requested(self, connection, link_handle, - name, requested_target, properties): - """Create a new message consumer.""" - addr = requested_target or "target-" + uuid.uuid4().hex - FakeBroker.ReceiverLink(self.server, self, - link_handle, addr) - - def sasl_step(self, connection, pn_sasl): - # only called if not using Cyrus SASL - if 'PLAIN' in self.sasl_mechanisms: - credentials = pn_sasl.recv() - if not credentials: - return # wait until some arrives - if credentials not in self.user_credentials: - # failed - return pn_sasl.done(pn_sasl.AUTH) - pn_sasl.done(pn_sasl.OK) - - class SenderLink(pyngus.SenderEventHandler): - """An AMQP sending link.""" - def __init__(self, server, conn, handle, src_addr=None): - self.server = server - self.conn = conn - cnn = conn.connection - self.link = cnn.accept_sender(handle, - source_override=src_addr, - event_handler=self) - conn.sender_links.add(self) - self.link.open() - self.routed = False - - def destroy(self): - """Destroy the link.""" - conn = self.conn - self.conn = None - conn.sender_links.remove(self) - conn.dead_links.discard(self) - if self.link: - self.link.destroy() - self.link = None - - def send_message(self, message): - """Send a message over this link.""" - self.link.send(message) - - def _cleanup(self): - if self.routed: - self.server.remove_route(self.link.source_address, - self) - self.routed = False - self.conn.dead_links.add(self) - - # Pyngus SenderEventHandler callbacks: - - def sender_active(self, sender_link): - self.server.sender_link_count += 1 - self.server.add_route(self.link.source_address, self) - self.routed = True - - def sender_remote_closed(self, sender_link, error): - self.link.close() - - def sender_closed(self, sender_link): - self.server.sender_link_count -= 1 - self._cleanup() - - def sender_failed(self, sender_link, error): - self.sender_closed(sender_link) - - class ReceiverLink(pyngus.ReceiverEventHandler): - """An AMQP Receiving link.""" - def __init__(self, server, conn, handle, addr=None): - self.server = server - self.conn = conn - cnn = conn.connection - self.link = cnn.accept_receiver(handle, - target_override=addr, - event_handler=self) - conn.receiver_links.add(self) - self.link.open() - self.link.add_capacity(10) - - def destroy(self): - """Destroy the link.""" - conn = self.conn - self.conn = None - conn.receiver_links.remove(self) - conn.dead_links.discard(self) - if self.link: - self.link.destroy() - self.link = None - - # ReceiverEventHandler callbacks: - - def receiver_active(self, receiver_link): - self.server.receiver_link_count += 1 - - def receiver_remote_closed(self, receiver_link, error): - self.link.close() - - def receiver_closed(self, receiver_link): - self.server.receiver_link_count -= 1 - self.conn.dead_links.add(self) - - def receiver_failed(self, receiver_link, error): - self.receiver_closed(receiver_link) - - def message_received(self, receiver_link, message, handle): - """Forward this message out the proper sending link.""" - if self.server.forward_message(message): - self.link.message_accepted(handle) - else: - self.link.message_rejected(handle) - - if self.link.capacity < 1: - self.link.add_capacity(10) - - def __init__(self, server_prefix="exclusive", - broadcast_prefix="broadcast", - group_prefix="unicast", - address_separator=".", - sock_addr="", sock_port=0, - sasl_mechanisms="ANONYMOUS", - user_credentials=None, - sasl_config_dir=None, - sasl_config_name=None): - """Create a fake broker listening on sock_addr:sock_port.""" - if not pyngus: - raise AssertionError("pyngus module not present") - threading.Thread.__init__(self) - self._server_prefix = server_prefix + address_separator - self._broadcast_prefix = broadcast_prefix + address_separator - self._group_prefix = group_prefix + address_separator - self._address_separator = address_separator - self._sasl_mechanisms = sasl_mechanisms - self._sasl_config_dir = sasl_config_dir - self._sasl_config_name = sasl_config_name - self._user_credentials = user_credentials - self._wakeup_pipe = os.pipe() - self._my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self._my_socket.bind((sock_addr, sock_port)) - self.host, self.port = self._my_socket.getsockname() - self.container = pyngus.Container("test_server_%s:%d" - % (self.host, self.port)) - self._connections = {} - self._sources = {} - self._pause = threading.Event() - # count of messages forwarded, by messaging pattern - self.direct_count = 0 - self.topic_count = 0 - self.fanout_count = 0 - self.fanout_sent_count = 0 - self.dropped_count = 0 - # counts for active links and connections: - self.connection_count = 0 - self.sender_link_count = 0 - self.receiver_link_count = 0 - - def start(self): - """Start the server.""" - LOG.debug("Starting Test Broker on %s:%d", self.host, self.port) - self._shutdown = False - self._closing = False - self.daemon = True - self._pause.set() - self._my_socket.listen(10) - super(FakeBroker, self).start() - - def pause(self): - self._pause.clear() - os.write(self._wakeup_pipe[1], b'!') - - def stop(self, clean=False): - """Stop the server.""" - # If clean is True, attempt a clean shutdown by closing all open - # links/connections first. Otherwise force an immediate disconnect - LOG.debug("Stopping test Broker %s:%d", self.host, self.port) - if clean: - self._closing = 1 - else: - self._shutdown = True - self._pause.set() - os.write(self._wakeup_pipe[1], b'!') - self.join() - LOG.debug("Test Broker %s:%d stopped", self.host, self.port) - - def run(self): - """Process I/O and timer events until the broker is stopped.""" - LOG.debug("Test Broker on %s:%d started", self.host, self.port) - while not self._shutdown: - self._pause.wait() - readers, writers, timers = self.container.need_processing() - - # map pyngus Connections back to _TestConnections: - readfd = [c.user_context for c in readers] - readfd.extend([self._my_socket, self._wakeup_pipe[0]]) - writefd = [c.user_context for c in writers] - - timeout = None - if timers: - # [0] == next expiring timer - deadline = timers[0].next_tick - now = time.time() - timeout = 0 if deadline <= now else deadline - now - - readable, writable, ignore = select.select(readfd, - writefd, - [], - timeout) - worked = set() - for r in readable: - if r is self._my_socket: - # new inbound connection request received - sock, addr = self._my_socket.accept() - if not self._closing: - # create a new Connection for it: - name = str(addr) - conn = FakeBroker.Connection(self, sock, name, - self._sasl_mechanisms, - self._user_credentials, - self._sasl_config_dir, - self._sasl_config_name) - self._connections[conn.name] = conn - else: - sock.close() # drop it - elif r is self._wakeup_pipe[0]: - os.read(self._wakeup_pipe[0], 512) - else: - r.process_input() - worked.add(r) - - for t in timers: - now = time.time() - if t.next_tick > now: - break - t.process(now) - conn = t.user_context - worked.add(conn) - - for w in writable: - w.send_output() - worked.add(w) - - # clean up any closed connections or links: - while worked: - conn = worked.pop() - if conn.connection.closed: - del self._connections[conn.name] - conn.destroy() - else: - while conn.dead_links: - conn.dead_links.pop().destroy() - - if self._closing and not self._connections: - self._shutdown = True - elif self._closing == 1: - # start closing connections - self._closing = 2 - for conn in self._connections.values(): - conn.connection.close() - - # Shutting down. Any open links are just disconnected - the peer will - # see a socket close. - self._my_socket.close() - for conn in self._connections.values(): - conn.destroy() - self._connections = None - self.container.destroy() - self.container = None - return 0 - - def add_route(self, address, link): - # route from address -> link[, link ...] - if address not in self._sources: - self._sources[address] = [link] - elif link not in self._sources[address]: - self._sources[address].append(link) - - def remove_route(self, address, link): - if address in self._sources: - if link in self._sources[address]: - self._sources[address].remove(link) - if not self._sources[address]: - del self._sources[address] - - def forward_message(self, message): - # returns True if message was routed - dest = message.address - if dest not in self._sources: - self.dropped_count += 1 - return False - LOG.debug("Forwarding [%s]", dest) - # route "behavior" determined by prefix: - if dest.startswith(self._broadcast_prefix): - self.fanout_count += 1 - for link in self._sources[dest]: - self.fanout_sent_count += 1 - LOG.debug("Broadcast to %s", dest) - link.send_message(message) - elif dest.startswith(self._group_prefix): - # round-robin: - self.topic_count += 1 - link = self._sources[dest].pop(0) - link.send_message(message) - LOG.debug("Send to %s", dest) - self._sources[dest].append(link) - else: - # unicast: - self.direct_count += 1 - LOG.debug("Unicast to %s", dest) - self._sources[dest][0].send_message(message) - return True diff --git a/oslo_messaging/tests/drivers/test_impl_kafka.py b/oslo_messaging/tests/drivers/test_impl_kafka.py deleted file mode 100644 index 4579453..0000000 --- a/oslo_messaging/tests/drivers/test_impl_kafka.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (C) 2015 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import kafka -from kafka.common import KafkaError -import mock -from oslo_serialization import jsonutils -import testscenarios -from testtools.testcase import unittest -import time - -import oslo_messaging -from oslo_messaging._drivers import common as driver_common -from oslo_messaging._drivers import impl_kafka as kafka_driver -from oslo_messaging.tests import utils as test_utils - -load_tests = testscenarios.load_tests_apply_scenarios - -KAFKA_BROKER = 'localhost:9092' -KAFKA_BROKER_URL = 'kafka://localhost:9092' - - -def _is_kafka_service_running(): - """Checks whether the Kafka service is running or not""" - kafka_running = True - try: - broker = KAFKA_BROKER - kafka.KafkaClient(broker) - except KafkaError: - # Kafka service is not running. - kafka_running = False - return kafka_running - - -class TestKafkaDriverLoad(test_utils.BaseTestCase): - - def setUp(self): - super(TestKafkaDriverLoad, self).setUp() - self.messaging_conf.transport_driver = 'kafka' - - def test_driver_load(self): - transport = oslo_messaging.get_transport(self.conf) - self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver) - - -class TestKafkaTransportURL(test_utils.BaseTestCase): - - scenarios = [ - ('none', dict(url=None, - expected=dict(hostaddrs=['localhost:9092']))), - ('empty', dict(url='kafka:///', - expected=dict(hostaddrs=['localhost:9092']))), - ('host', dict(url='kafka://127.0.0.1', - expected=dict(hostaddrs=['127.0.0.1:9092']))), - ('port', dict(url='kafka://localhost:1234', - expected=dict(hostaddrs=['localhost:1234']))), - ('two', dict(url='kafka://localhost:1234,localhost2:1234', - expected=dict(hostaddrs=['localhost:1234', - 'localhost2:1234']))), - - ] - - def setUp(self): - super(TestKafkaTransportURL, self).setUp() - self.messaging_conf.transport_driver = 'kafka' - - def test_transport_url(self): - transport = oslo_messaging.get_transport(self.conf, self.url) - self.addCleanup(transport.cleanup) - driver = transport._driver - - conn = driver._get_connection(kafka_driver.PURPOSE_SEND) - self.assertEqual(self.expected['hostaddrs'], conn.hostaddrs) - - -class TestKafkaDriver(test_utils.BaseTestCase): - """Unit Test cases to test the kafka driver - """ - - def setUp(self): - super(TestKafkaDriver, self).setUp() - self.messaging_conf.transport_driver = 'kafka' - transport = oslo_messaging.get_transport(self.conf) - self.driver = transport._driver - - def test_send(self): - target = oslo_messaging.Target(topic="topic_test") - self.assertRaises(NotImplementedError, - self.driver.send, target, {}, {}) - - def test_send_notification(self): - target = oslo_messaging.Target(topic="topic_test") - - with mock.patch.object( - kafka_driver.Connection, 'notify_send') as fake_send: - self.driver.send_notification(target, {}, {}, None) - self.assertEqual(1, len(fake_send.mock_calls)) - - def test_listen(self): - target = oslo_messaging.Target(topic="topic_test") - self.assertRaises(NotImplementedError, self.driver.listen, target) - - -class TestKafkaConnection(test_utils.BaseTestCase): - - def setUp(self): - super(TestKafkaConnection, self).setUp() - self.messaging_conf.transport_driver = 'kafka' - transport = oslo_messaging.get_transport(self.conf) - self.driver = transport._driver - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, '_send') - def test_notify(self, fake_send, fake_ensure_connection): - conn = self.driver._get_connection(kafka_driver.PURPOSE_SEND) - conn.notify_send("fake_topic", {"fake_ctxt": "fake_param"}, - {"fake_text": "fake_message_1"}, 10) - self.assertEqual(1, len(fake_send.mock_calls)) - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, '_send') - def test_notify_with_retry(self, fake_send, fake_ensure_connection): - conn = self.driver._get_connection(kafka_driver.PURPOSE_SEND) - fake_send.side_effect = KafkaError("fake_exception") - conn.notify_send("fake_topic", {"fake_ctxt": "fake_param"}, - {"fake_text": "fake_message_2"}, 10) - self.assertEqual(10, len(fake_send.mock_calls)) - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, '_parse_url') - def test_consume(self, fake_parse_url, fake_ensure_connection): - fake_message = { - "context": {"fake": "fake_context_1"}, - "message": {"fake": "fake_message_1"}} - - conn = kafka_driver.Connection( - self.conf, '', kafka_driver.PURPOSE_LISTEN) - - conn.consumer = mock.MagicMock() - conn.consumer.fetch_messages = mock.MagicMock( - return_value=iter([jsonutils.dumps(fake_message)])) - - self.assertEqual(fake_message, jsonutils.loads(conn.consume()[0])) - self.assertEqual(1, len(conn.consumer.fetch_messages.mock_calls)) - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, '_parse_url') - def test_consume_timeout(self, fake_parse_url, fake_ensure_connection): - deadline = time.time() + 3 - conn = kafka_driver.Connection( - self.conf, '', kafka_driver.PURPOSE_LISTEN) - - conn.consumer = mock.MagicMock() - conn.consumer.fetch_messages = mock.MagicMock(return_value=iter([])) - - self.assertRaises(driver_common.Timeout, conn.consume, timeout=3) - self.assertEqual(0, int(deadline - time.time())) - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, '_parse_url') - def test_consume_with_default_timeout( - self, fake_parse_url, fake_ensure_connection): - deadline = time.time() + 1 - conn = kafka_driver.Connection( - self.conf, '', kafka_driver.PURPOSE_LISTEN) - - conn.consumer = mock.MagicMock() - conn.consumer.fetch_messages = mock.MagicMock(return_value=iter([])) - - self.assertRaises(driver_common.Timeout, conn.consume) - self.assertEqual(0, int(deadline - time.time())) - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, '_parse_url') - def test_consume_timeout_without_consumers( - self, fake_parse_url, fake_ensure_connection): - deadline = time.time() + 3 - conn = kafka_driver.Connection( - self.conf, '', kafka_driver.PURPOSE_LISTEN) - conn.consumer = mock.MagicMock(return_value=None) - - self.assertRaises(driver_common.Timeout, conn.consume, timeout=3) - self.assertEqual(0, int(deadline - time.time())) - - -class TestKafkaListener(test_utils.BaseTestCase): - - def setUp(self): - super(TestKafkaListener, self).setUp() - self.messaging_conf.transport_driver = 'kafka' - transport = oslo_messaging.get_transport(self.conf) - self.driver = transport._driver - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, 'declare_topic_consumer') - def test_create_listener(self, fake_consumer, fake_ensure_connection): - fake_target = oslo_messaging.Target(topic='fake_topic') - fake_targets_and_priorities = [(fake_target, 'info')] - self.driver.listen_for_notifications(fake_targets_and_priorities, None, - None, None) - self.assertEqual(1, len(fake_consumer.mock_calls)) - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, 'declare_topic_consumer') - def test_converting_targets_to_topics(self, fake_consumer, - fake_ensure_connection): - fake_targets_and_priorities = [ - (oslo_messaging.Target(topic="fake_topic", - exchange="test1"), 'info'), - (oslo_messaging.Target(topic="fake_topic", - exchange="test2"), 'info'), - (oslo_messaging.Target(topic="fake_topic", - exchange="test1"), 'error'), - (oslo_messaging.Target(topic="fake_topic", - exchange="test3"), 'error'), - ] - self.driver.listen_for_notifications(fake_targets_and_priorities, None, - None, None) - self.assertEqual(1, len(fake_consumer.mock_calls)) - fake_consumer.assert_called_once_with(set(['fake_topic.error', - 'fake_topic.info']), - None) - - @mock.patch.object(kafka_driver.Connection, '_ensure_connection') - @mock.patch.object(kafka_driver.Connection, 'declare_topic_consumer') - def test_stop_listener(self, fake_consumer, fake_client): - fake_target = oslo_messaging.Target(topic='fake_topic') - fake_targets_and_priorities = [(fake_target, 'info')] - listener = self.driver.listen_for_notifications( - fake_targets_and_priorities, None, None, None)._poll_style_listener - listener.conn.consume = mock.MagicMock() - listener.conn.consume.return_value = ( - iter([kafka.common.KafkaMessage( - topic='fake_topic', partition=0, offset=0, - key=None, value='{"message": {"fake": "fake_message_1"},' - '"context": {"fake": "fake_context_1"}}')])) - listener.poll() - self.assertEqual(1, len(listener.conn.consume.mock_calls)) - listener.conn.stop_consuming = mock.MagicMock() - listener.stop() - fake_response = listener.poll() - self.assertEqual(1, len(listener.conn.consume.mock_calls)) - self.assertEqual([], fake_response) - - -class TestWithRealKafkaBroker(test_utils.BaseTestCase): - - def setUp(self): - super(TestWithRealKafkaBroker, self).setUp() - self.messaging_conf.transport_driver = 'kafka' - transport = oslo_messaging.get_transport(self.conf, KAFKA_BROKER_URL) - self.driver = transport._driver - - @unittest.skipUnless( - _is_kafka_service_running(), "Kafka service is not available") - def test_send_and_receive_message(self): - target = oslo_messaging.Target( - topic="fake_topic", exchange='fake_exchange') - targets_and_priorities = [(target, 'fake_info')] - - listener = self.driver.listen_for_notifications( - targets_and_priorities, None, None, None)._poll_style_listener - fake_context = {"fake_context_key": "fake_context_value"} - fake_message = {"fake_message_key": "fake_message_value"} - self.driver.send_notification( - target, fake_context, fake_message, None) - - received_message = listener.poll()[0] - self.assertEqual(fake_context, received_message.ctxt) - self.assertEqual(fake_message, received_message.message) - - @unittest.skipUnless( - _is_kafka_service_running(), "Kafka service is not available") - def test_send_and_receive_message_without_exchange(self): - target = oslo_messaging.Target(topic="fake_no_exchange_topic") - targets_and_priorities = [(target, 'fake_info')] - - listener = self.driver.listen_for_notifications( - targets_and_priorities, None, None, None)._poll_style_listener - fake_context = {"fake_context_key": "fake_context_value"} - fake_message = {"fake_message_key": "fake_message_value"} - self.driver.send_notification( - target, fake_context, fake_message, None) - - received_message = listener.poll()[0] - self.assertEqual(fake_context, received_message.ctxt) - self.assertEqual(fake_message, received_message.message) - - @unittest.skipUnless( - _is_kafka_service_running(), "Kafka service is not available") - def test_receive_message_from_empty_topic_with_timeout(self): - target = oslo_messaging.Target( - topic="fake_empty_topic", exchange='fake_empty_exchange') - targets_and_priorities = [(target, 'fake_info')] - - listener = self.driver.listen_for_notifications( - targets_and_priorities, None, None, None)._poll_style_listener - - deadline = time.time() + 3 - received_message = listener.poll(batch_timeout=3) - self.assertEqual(0, int(deadline - time.time())) - self.assertEqual([], received_message) diff --git a/oslo_messaging/tests/drivers/test_impl_rabbit.py b/oslo_messaging/tests/drivers/test_impl_rabbit.py deleted file mode 100644 index f3ddef6..0000000 --- a/oslo_messaging/tests/drivers/test_impl_rabbit.py +++ /dev/null @@ -1,1059 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import ssl -import sys -import threading -import time -import uuid - -import fixtures -import kombu -import kombu.transport.memory -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import versionutils -from oslotest import mockpatch -import pkg_resources -import testscenarios - -import oslo_messaging -from oslo_messaging._drivers import amqpdriver -from oslo_messaging._drivers import common as driver_common -from oslo_messaging._drivers import impl_rabbit as rabbit_driver -from oslo_messaging.tests import utils as test_utils -from six.moves import mock - -load_tests = testscenarios.load_tests_apply_scenarios - - -class TestDeprecatedRabbitDriverLoad(test_utils.BaseTestCase): - - def setUp(self): - super(TestDeprecatedRabbitDriverLoad, self).setUp( - conf=cfg.ConfigOpts()) - self.messaging_conf.transport_driver = 'rabbit' - self.config(fake_rabbit=True, group="oslo_messaging_rabbit") - - def test_driver_load(self): - transport = oslo_messaging.get_transport(self.conf) - self.addCleanup(transport.cleanup) - driver = transport._driver - url = driver._get_connection()._url - - self.assertIsInstance(driver, rabbit_driver.RabbitDriver) - self.assertEqual('memory:////', url) - - -class TestHeartbeat(test_utils.BaseTestCase): - - @mock.patch('oslo_messaging._drivers.impl_rabbit.LOG') - @mock.patch('kombu.connection.Connection.heartbeat_check') - @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.' - '_heartbeat_supported_and_enabled', return_value=True) - @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.' - 'ensure_connection') - def _do_test_heartbeat_sent(self, fake_ensure_connection, - fake_heartbeat_support, fake_heartbeat, - fake_logger, heartbeat_side_effect=None, - info=None): - - event = threading.Event() - - def heartbeat_check(rate=2): - event.set() - if heartbeat_side_effect: - raise heartbeat_side_effect - - fake_heartbeat.side_effect = heartbeat_check - - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - conn = transport._driver._get_connection() - conn.ensure(method=lambda: True) - event.wait() - conn._heartbeat_stop() - - # check heartbeat have been called - self.assertLess(0, fake_heartbeat.call_count) - - if not heartbeat_side_effect: - self.assertEqual(1, fake_ensure_connection.call_count) - self.assertEqual(2, fake_logger.debug.call_count) - self.assertEqual(0, fake_logger.info.call_count) - else: - self.assertEqual(2, fake_ensure_connection.call_count) - self.assertEqual(2, fake_logger.debug.call_count) - self.assertEqual(1, fake_logger.info.call_count) - self.assertIn(mock.call(info, mock.ANY), - fake_logger.info.mock_calls) - - def test_test_heartbeat_sent_default(self): - self._do_test_heartbeat_sent() - - def test_test_heartbeat_sent_connection_fail(self): - self._do_test_heartbeat_sent( - heartbeat_side_effect=kombu.exceptions.ConnectionError, - info='A recoverable connection/channel error occurred, ' - 'trying to reconnect: %s') - - -class TestRabbitQos(test_utils.BaseTestCase): - - def connection_with(self, prefetch, purpose): - self.config(rabbit_qos_prefetch_count=prefetch, - group="oslo_messaging_rabbit") - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - transport._driver._get_connection(purpose) - - @mock.patch('kombu.transport.memory.Channel.basic_qos') - def test_qos_sent_on_listen_connection(self, fake_basic_qos): - self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_LISTEN) - fake_basic_qos.assert_called_once_with(0, 1, False) - - @mock.patch('kombu.transport.memory.Channel.basic_qos') - def test_qos_not_sent_when_cfg_zero(self, fake_basic_qos): - self.connection_with(prefetch=0, purpose=driver_common.PURPOSE_LISTEN) - fake_basic_qos.assert_not_called() - - @mock.patch('kombu.transport.memory.Channel.basic_qos') - def test_qos_not_sent_on_send_connection(self, fake_basic_qos): - self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_SEND) - fake_basic_qos.assert_not_called() - - -class TestRabbitDriverLoad(test_utils.BaseTestCase): - - scenarios = [ - ('rabbit', dict(transport_driver='rabbit', - url='amqp://guest:guest@localhost:5672//')), - ('kombu', dict(transport_driver='kombu', - url='amqp://guest:guest@localhost:5672//')), - ('rabbit+memory', dict(transport_driver='kombu+memory', - url='memory:///')) - ] - - @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure') - @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset') - def test_driver_load(self, fake_ensure, fake_reset): - self.config(heartbeat_timeout_threshold=60, - group='oslo_messaging_rabbit') - self.messaging_conf.transport_driver = self.transport_driver - transport = oslo_messaging.get_transport(self.conf) - self.addCleanup(transport.cleanup) - driver = transport._driver - url = driver._get_connection()._url - - self.assertIsInstance(driver, rabbit_driver.RabbitDriver) - self.assertEqual(self.url, url) - - -class TestRabbitDriverLoadSSL(test_utils.BaseTestCase): - scenarios = [ - ('no_ssl', dict(options=dict(), expected=False)), - ('no_ssl_with_options', dict(options=dict(kombu_ssl_version='TLSv1'), - expected=False)), - ('just_ssl', dict(options=dict(rabbit_use_ssl=True), - expected=True)), - ('ssl_with_options', dict(options=dict(rabbit_use_ssl=True, - kombu_ssl_version='TLSv1', - kombu_ssl_keyfile='foo', - kombu_ssl_certfile='bar', - kombu_ssl_ca_certs='foobar'), - expected=dict(ssl_version=3, - keyfile='foo', - certfile='bar', - ca_certs='foobar', - cert_reqs=ssl.CERT_REQUIRED))), - ] - - @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure') - @mock.patch('kombu.connection.Connection') - def test_driver_load(self, connection_klass, fake_ensure): - self.config(group="oslo_messaging_rabbit", **self.options) - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - - connection = transport._driver._get_connection() - connection_klass.assert_called_once_with( - 'memory:///', transport_options={ - 'client_properties': { - 'capabilities': { - 'connection.blocked': True, - 'consumer_cancel_notify': True, - 'authentication_failure_close': True, - }, - 'connection_name': connection.name}, - 'confirm_publish': True, - 'on_blocked': mock.ANY, - 'on_unblocked': mock.ANY}, - ssl=self.expected, login_method='AMQPLAIN', - heartbeat=60, failover_strategy='round-robin' - ) - - -class TestRabbitPublisher(test_utils.BaseTestCase): - @mock.patch('kombu.messaging.Producer.publish') - def test_send_with_timeout(self, fake_publish): - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - exchange_mock = mock.Mock() - with transport._driver._get_connection( - driver_common.PURPOSE_SEND) as pool_conn: - conn = pool_conn.connection - conn._publish(exchange_mock, 'msg', routing_key='routing_key', - timeout=1) - - # NOTE(gcb) kombu accept TTL as seconds instead of millisecond since - # version 3.0.25, so do conversion according to kombu version. - # TODO(gcb) remove this workaround when all supported branches - # with requirement kombu >=3.0.25 - kombu_version = pkg_resources.get_distribution('kombu').version - if versionutils.is_compatible('3.0.25', kombu_version): - fake_publish.assert_called_with( - 'msg', expiration=1, - exchange=exchange_mock, - compression=self.conf.oslo_messaging_rabbit.kombu_compression, - routing_key='routing_key') - else: - fake_publish.assert_called_with( - 'msg', expiration=1000, - exchange=exchange_mock, - compression=self.conf.oslo_messaging_rabbit.kombu_compression, - routing_key='routing_key') - - @mock.patch('kombu.messaging.Producer.publish') - def test_send_no_timeout(self, fake_publish): - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - exchange_mock = mock.Mock() - with transport._driver._get_connection( - driver_common.PURPOSE_SEND) as pool_conn: - conn = pool_conn.connection - conn._publish(exchange_mock, 'msg', routing_key='routing_key') - fake_publish.assert_called_with( - 'msg', expiration=None, - compression=self.conf.oslo_messaging_rabbit.kombu_compression, - exchange=exchange_mock, - routing_key='routing_key') - - def test_declared_queue_publisher(self): - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - - e_passive = kombu.entity.Exchange( - name='foobar', - type='topic', - passive=True) - - e_active = kombu.entity.Exchange( - name='foobar', - type='topic', - passive=False) - - with transport._driver._get_connection( - driver_common.PURPOSE_SEND) as pool_conn: - conn = pool_conn.connection - exc = conn.connection.channel_errors[0] - - def try_send(exchange): - conn._ensure_publishing( - conn._publish_and_creates_default_queue, - exchange, {}, routing_key='foobar') - - with mock.patch('kombu.transport.virtual.Channel.close'): - # Ensure the exchange does not exists - self.assertRaises(exc, try_send, e_passive) - # Create it - try_send(e_active) - # Ensure it creates it - try_send(e_passive) - - with mock.patch('kombu.messaging.Producer.publish', - side_effect=exc): - # Ensure the exchange is already in cache - self.assertIn('foobar', conn._declared_exchanges) - # Reset connection - self.assertRaises(exc, try_send, e_passive) - # Ensure the cache is empty - self.assertEqual(0, len(conn._declared_exchanges)) - - try_send(e_active) - self.assertIn('foobar', conn._declared_exchanges) - - -class TestRabbitConsume(test_utils.BaseTestCase): - - def test_consume_timeout(self): - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - deadline = time.time() + 6 - with transport._driver._get_connection( - driver_common.PURPOSE_LISTEN) as conn: - self.assertRaises(driver_common.Timeout, - conn.consume, timeout=3) - - # kombu memory transport doesn't really raise error - # so just simulate a real driver behavior - conn.connection.connection.recoverable_channel_errors = (IOError,) - conn.declare_fanout_consumer("notif.info", lambda msg: True) - with mock.patch('kombu.connection.Connection.drain_events', - side_effect=IOError): - self.assertRaises(driver_common.Timeout, - conn.consume, timeout=3) - - self.assertEqual(0, int(deadline - time.time())) - - def test_consume_from_missing_queue(self): - transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://') - self.addCleanup(transport.cleanup) - with transport._driver._get_connection( - driver_common.PURPOSE_LISTEN) as conn: - with mock.patch('kombu.Queue.consume') as consume, mock.patch( - 'kombu.Queue.declare') as declare: - conn.declare_topic_consumer(exchange_name='test', - topic='test', - callback=lambda msg: True) - import amqp - consume.side_effect = [amqp.NotFound, None] - conn.connection.connection.recoverable_connection_errors = () - conn.connection.connection.recoverable_channel_errors = () - self.assertEqual(1, declare.call_count) - conn.connection.connection.transport.drain_events = mock.Mock() - # Ensure that a queue will be re-declared if the consume method - # of kombu.Queue raise amqp.NotFound - conn.consume() - self.assertEqual(2, declare.call_count) - - def test_consume_from_missing_queue_with_io_error_on_redeclaration(self): - transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://') - self.addCleanup(transport.cleanup) - with transport._driver._get_connection( - driver_common.PURPOSE_LISTEN) as conn: - with mock.patch('kombu.Queue.consume') as consume, mock.patch( - 'kombu.Queue.declare') as declare: - conn.declare_topic_consumer(exchange_name='test', - topic='test', - callback=lambda msg: True) - import amqp - consume.side_effect = [amqp.NotFound, None] - declare.side_effect = [IOError, None] - - conn.connection.connection.recoverable_connection_errors = ( - IOError,) - conn.connection.connection.recoverable_channel_errors = () - self.assertEqual(1, declare.call_count) - conn.connection.connection.transport.drain_events = mock.Mock() - # Ensure that a queue will be re-declared after - # 'queue not found' exception despite on connection error. - conn.consume() - self.assertEqual(3, declare.call_count) - - def test_connection_ack_have_disconnected_kombu_connection(self): - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - with transport._driver._get_connection( - driver_common.PURPOSE_LISTEN) as conn: - channel = conn.connection.channel - with mock.patch('kombu.connection.Connection.connected', - new_callable=mock.PropertyMock, - return_value=False): - self.assertRaises(driver_common.Timeout, - conn.connection.consume, timeout=0.01) - # Ensure a new channel have been setuped - self.assertNotEqual(channel, conn.connection.channel) - - -class TestRabbitTransportURL(test_utils.BaseTestCase): - - scenarios = [ - ('none', dict(url=None, - expected=["amqp://guest:guest@localhost:5672//"])), - ('memory', dict(url='kombu+memory:////', - expected=["memory:///"])), - ('empty', - dict(url='rabbit:///', - expected=['amqp://guest:guest@localhost:5672/'])), - ('localhost', - dict(url='rabbit://localhost/', - expected=['amqp://:@localhost:5672/'])), - ('virtual_host', - dict(url='rabbit:///vhost', - expected=['amqp://guest:guest@localhost:5672/vhost'])), - ('no_creds', - dict(url='rabbit://host/virtual_host', - expected=['amqp://:@host:5672/virtual_host'])), - ('no_port', - dict(url='rabbit://user:password@host/virtual_host', - expected=['amqp://user:password@host:5672/virtual_host'])), - ('full_url', - dict(url='rabbit://user:password@host:10/virtual_host', - expected=['amqp://user:password@host:10/virtual_host'])), - ('full_two_url', - dict(url='rabbit://user:password@host:10,' - 'user2:password2@host2:12/virtual_host', - expected=["amqp://user:password@host:10/virtual_host", - "amqp://user2:password2@host2:12/virtual_host"] - )), - ('rabbit_ipv6', - dict(url='rabbit://u:p@[fd00:beef:dead:55::133]:10/vhost', - expected=['amqp://u:p@[fd00:beef:dead:55::133]:10/vhost'])), - ('rabbit_ipv4', - dict(url='rabbit://user:password@10.20.30.40:10/vhost', - expected=['amqp://user:password@10.20.30.40:10/vhost'])), - ] - - def setUp(self): - super(TestRabbitTransportURL, self).setUp() - self.messaging_conf.transport_driver = 'rabbit' - self.config(heartbeat_timeout_threshold=0, - group='oslo_messaging_rabbit') - - @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure') - @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset') - def test_transport_url(self, fake_reset, fake_ensure): - transport = oslo_messaging.get_transport(self.conf, self.url) - self.addCleanup(transport.cleanup) - driver = transport._driver - - urls = driver._get_connection()._url.split(";") - self.assertEqual(sorted(self.expected), sorted(urls)) - - -class TestSendReceive(test_utils.BaseTestCase): - - _n_senders = [ - ('single_sender', dict(n_senders=1)), - ('multiple_senders', dict(n_senders=10)), - ] - - _context = [ - ('empty_context', dict(ctxt={})), - ('with_context', dict(ctxt={'user': 'mark'})), - ] - - _reply = [ - ('rx_id', dict(rx_id=True, reply=None)), - ('none', dict(rx_id=False, reply=None)), - ('empty_list', dict(rx_id=False, reply=[])), - ('empty_dict', dict(rx_id=False, reply={})), - ('false', dict(rx_id=False, reply=False)), - ('zero', dict(rx_id=False, reply=0)), - ] - - _failure = [ - ('success', dict(failure=False)), - ('failure', dict(failure=True, expected=False)), - ('expected_failure', dict(failure=True, expected=True)), - ] - - _timeout = [ - ('no_timeout', dict(timeout=None)), - ('timeout', dict(timeout=0.01)), # FIXME(markmc): timeout=0 is broken? - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders, - cls._context, - cls._reply, - cls._failure, - cls._timeout) - - def test_send_receive(self): - self.config(kombu_missing_consumer_retry_timeout=0.5, - group="oslo_messaging_rabbit") - self.config(heartbeat_timeout_threshold=0, - group="oslo_messaging_rabbit") - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - - driver = transport._driver - - target = oslo_messaging.Target(topic='testtopic') - - listener = driver.listen(target, None, None)._poll_style_listener - - senders = [] - replies = [] - msgs = [] - - def send_and_wait_for_reply(i): - try: - - timeout = self.timeout - replies.append(driver.send(target, - self.ctxt, - {'tx_id': i}, - wait_for_reply=True, - timeout=timeout)) - self.assertFalse(self.failure) - self.assertIsNone(self.timeout) - except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e: - replies.append(e) - self.assertTrue(self.failure or self.timeout is not None) - - while len(senders) < self.n_senders: - senders.append(threading.Thread(target=send_and_wait_for_reply, - args=(len(senders), ))) - - for i in range(len(senders)): - senders[i].start() - - received = listener.poll()[0] - self.assertIsNotNone(received) - self.assertEqual(self.ctxt, received.ctxt) - self.assertEqual({'tx_id': i}, received.message) - msgs.append(received) - - # reply in reverse, except reply to the first guy second from last - order = list(range(len(senders) - 1, -1, -1)) - if len(order) > 1: - order[-1], order[-2] = order[-2], order[-1] - - for i in order: - if self.timeout is None: - if self.failure: - try: - raise ZeroDivisionError - except Exception: - failure = sys.exc_info() - msgs[i].reply(failure=failure) - elif self.rx_id: - msgs[i].reply({'rx_id': i}) - else: - msgs[i].reply(self.reply) - senders[i].join() - - self.assertEqual(len(senders), len(replies)) - for i, reply in enumerate(replies): - if self.timeout is not None: - self.assertIsInstance(reply, oslo_messaging.MessagingTimeout) - elif self.failure: - self.assertIsInstance(reply, ZeroDivisionError) - elif self.rx_id: - self.assertEqual({'rx_id': order[i]}, reply) - else: - self.assertEqual(self.reply, reply) - - -TestSendReceive.generate_scenarios() - - -class TestPollAsync(test_utils.BaseTestCase): - - def test_poll_timeout(self): - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - driver = transport._driver - target = oslo_messaging.Target(topic='testtopic') - listener = driver.listen(target, None, None)._poll_style_listener - received = listener.poll(timeout=0.050) - self.assertEqual([], received) - - -class TestRacyWaitForReply(test_utils.BaseTestCase): - - def test_send_receive(self): - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - - driver = transport._driver - - target = oslo_messaging.Target(topic='testtopic') - - listener = driver.listen(target, None, None)._poll_style_listener - senders = [] - replies = [] - msgs = [] - - wait_conditions = [] - orig_reply_waiter = amqpdriver.ReplyWaiter.wait - - def reply_waiter(self, msg_id, timeout): - if wait_conditions: - cond = wait_conditions.pop() - with cond: - cond.notify() - with cond: - cond.wait() - return orig_reply_waiter(self, msg_id, timeout) - - self.stubs.Set(amqpdriver.ReplyWaiter, 'wait', reply_waiter) - - def send_and_wait_for_reply(i, wait_for_reply): - replies.append(driver.send(target, - {}, - {'tx_id': i}, - wait_for_reply=wait_for_reply, - timeout=None)) - - while len(senders) < 2: - t = threading.Thread(target=send_and_wait_for_reply, - args=(len(senders), True)) - t.daemon = True - senders.append(t) - - # test the case then msg_id is not set - t = threading.Thread(target=send_and_wait_for_reply, - args=(len(senders), False)) - t.daemon = True - senders.append(t) - - # Start the first guy, receive his message, but delay his polling - notify_condition = threading.Condition() - wait_conditions.append(notify_condition) - with notify_condition: - senders[0].start() - notify_condition.wait() - - msgs.extend(listener.poll()) - self.assertEqual({'tx_id': 0}, msgs[-1].message) - - # Start the second guy, receive his message - senders[1].start() - - msgs.extend(listener.poll()) - self.assertEqual({'tx_id': 1}, msgs[-1].message) - - # Reply to both in order, making the second thread queue - # the reply meant for the first thread - msgs[0].reply({'rx_id': 0}) - msgs[1].reply({'rx_id': 1}) - - # Wait for the second thread to finish - senders[1].join() - - # Start the 3rd guy, receive his message - senders[2].start() - - msgs.extend(listener.poll()) - self.assertEqual({'tx_id': 2}, msgs[-1].message) - - # Verify the _send_reply was not invoked by driver: - with mock.patch.object(msgs[2], '_send_reply') as method: - msgs[2].reply({'rx_id': 2}) - self.assertEqual(0, method.call_count) - - # Wait for the 3rd thread to finish - senders[2].join() - - # Let the first thread continue - with notify_condition: - notify_condition.notify() - - # Wait for the first thread to finish - senders[0].join() - - # Verify replies were received out of order - self.assertEqual(len(senders), len(replies)) - self.assertEqual({'rx_id': 1}, replies[0]) - self.assertIsNone(replies[1]) - self.assertEqual({'rx_id': 0}, replies[2]) - - -def _declare_queue(target): - connection = kombu.connection.BrokerConnection(transport='memory') - - # Kludge to speed up tests. - connection.transport.polling_interval = 0.0 - - connection.connect() - channel = connection.channel() - - # work around 'memory' transport bug in 1.1.3 - channel._new_queue('ae.undeliver') - - if target.fanout: - exchange = kombu.entity.Exchange(name=target.topic + '_fanout', - type='fanout', - durable=False, - auto_delete=True) - queue = kombu.entity.Queue(name=target.topic + '_fanout_12345', - channel=channel, - exchange=exchange, - routing_key=target.topic) - elif target.server: - exchange = kombu.entity.Exchange(name='openstack', - type='topic', - durable=False, - auto_delete=False) - topic = '%s.%s' % (target.topic, target.server) - queue = kombu.entity.Queue(name=topic, - channel=channel, - exchange=exchange, - routing_key=topic) - else: - exchange = kombu.entity.Exchange(name='openstack', - type='topic', - durable=False, - auto_delete=False) - queue = kombu.entity.Queue(name=target.topic, - channel=channel, - exchange=exchange, - routing_key=target.topic) - - queue.declare() - - return connection, channel, queue - - -class TestRequestWireFormat(test_utils.BaseTestCase): - - _target = [ - ('topic_target', - dict(topic='testtopic', server=None, fanout=False)), - ('server_target', - dict(topic='testtopic', server='testserver', fanout=False)), - ('fanout_target', - dict(topic='testtopic', server=None, fanout=True)), - ] - - _msg = [ - ('empty_msg', - dict(msg={}, expected={})), - ('primitive_msg', - dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), - ('complex_msg', - dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}}, - expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), - ] - - _context = [ - ('empty_ctxt', dict(ctxt={}, expected_ctxt={})), - ('user_project_ctxt', - dict(ctxt={'user': 'mark', 'project': 'snarkybunch'}, - expected_ctxt={'_context_user': 'mark', - '_context_project': 'snarkybunch'})), - ] - - _compression = [ - ('gzip_compression', dict(compression='gzip')), - ('without_compression', dict(compression=None)) - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._msg, - cls._context, - cls._target, - cls._compression) - - def setUp(self): - super(TestRequestWireFormat, self).setUp() - self.uuids = [] - self.orig_uuid4 = uuid.uuid4 - self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4)) - - def mock_uuid4(self): - self.uuids.append(self.orig_uuid4()) - return self.uuids[-1] - - def test_request_wire_format(self): - self.conf.oslo_messaging_rabbit.kombu_compression = self.compression - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - - driver = transport._driver - - target = oslo_messaging.Target(topic=self.topic, - server=self.server, - fanout=self.fanout) - - connection, channel, queue = _declare_queue(target) - self.addCleanup(connection.release) - - driver.send(target, self.ctxt, self.msg) - - msgs = [] - - def callback(msg): - msg = channel.message_to_python(msg) - msg.ack() - msgs.append(msg.payload) - - queue.consume(callback=callback, - consumer_tag='1', - nowait=False) - - connection.drain_events() - - self.assertEqual(1, len(msgs)) - self.assertIn('oslo.message', msgs[0]) - - received = msgs[0] - received['oslo.message'] = jsonutils.loads(received['oslo.message']) - - # FIXME(markmc): add _msg_id and _reply_q check - expected_msg = { - '_unique_id': self.uuids[0].hex, - } - expected_msg.update(self.expected) - expected_msg.update(self.expected_ctxt) - - expected = { - 'oslo.version': '2.0', - 'oslo.message': expected_msg, - } - - self.assertEqual(expected, received) - - -TestRequestWireFormat.generate_scenarios() - - -def _create_producer(target): - connection = kombu.connection.BrokerConnection(transport='memory') - - # Kludge to speed up tests. - connection.transport.polling_interval = 0.0 - - connection.connect() - channel = connection.channel() - - # work around 'memory' transport bug in 1.1.3 - channel._new_queue('ae.undeliver') - - if target.fanout: - exchange = kombu.entity.Exchange(name=target.topic + '_fanout', - type='fanout', - durable=False, - auto_delete=True) - producer = kombu.messaging.Producer(exchange=exchange, - channel=channel, - routing_key=target.topic) - elif target.server: - exchange = kombu.entity.Exchange(name='openstack', - type='topic', - durable=False, - auto_delete=False) - topic = '%s.%s' % (target.topic, target.server) - producer = kombu.messaging.Producer(exchange=exchange, - channel=channel, - routing_key=topic) - else: - exchange = kombu.entity.Exchange(name='openstack', - type='topic', - durable=False, - auto_delete=False) - producer = kombu.messaging.Producer(exchange=exchange, - channel=channel, - routing_key=target.topic) - - return connection, producer - - -class TestReplyWireFormat(test_utils.BaseTestCase): - - _target = [ - ('topic_target', - dict(topic='testtopic', server=None, fanout=False)), - ('server_target', - dict(topic='testtopic', server='testserver', fanout=False)), - ('fanout_target', - dict(topic='testtopic', server=None, fanout=True)), - ] - - _msg = [ - ('empty_msg', - dict(msg={}, expected={})), - ('primitive_msg', - dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), - ('complex_msg', - dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}}, - expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), - ] - - _context = [ - ('empty_ctxt', dict(ctxt={}, expected_ctxt={})), - ('user_project_ctxt', - dict(ctxt={'_context_user': 'mark', - '_context_project': 'snarkybunch'}, - expected_ctxt={'user': 'mark', 'project': 'snarkybunch'})), - ] - - _compression = [ - ('gzip_compression', dict(compression='gzip')), - ('without_compression', dict(compression=None)) - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._msg, - cls._context, - cls._target, - cls._compression) - - def test_reply_wire_format(self): - self.conf.oslo_messaging_rabbit.kombu_compression = self.compression - - transport = oslo_messaging.get_transport(self.conf, - 'kombu+memory:////') - self.addCleanup(transport.cleanup) - - driver = transport._driver - - target = oslo_messaging.Target(topic=self.topic, - server=self.server, - fanout=self.fanout) - - listener = driver.listen(target, None, None)._poll_style_listener - - connection, producer = _create_producer(target) - self.addCleanup(connection.release) - - msg = { - 'oslo.version': '2.0', - 'oslo.message': {} - } - - msg['oslo.message'].update(self.msg) - msg['oslo.message'].update(self.ctxt) - - msg['oslo.message'].update({ - '_msg_id': uuid.uuid4().hex, - '_unique_id': uuid.uuid4().hex, - '_reply_q': 'reply_' + uuid.uuid4().hex, - }) - - msg['oslo.message'] = jsonutils.dumps(msg['oslo.message']) - - producer.publish(msg) - - received = listener.poll()[0] - self.assertIsNotNone(received) - self.assertEqual(self.expected_ctxt, received.ctxt) - self.assertEqual(self.expected, received.message) - - -TestReplyWireFormat.generate_scenarios() - - -class RpcKombuHATestCase(test_utils.BaseTestCase): - - def setUp(self): - super(RpcKombuHATestCase, self).setUp() - self.brokers = ['host1', 'host2', 'host3', 'host4', 'host5'] - self.config(rabbit_hosts=self.brokers, - rabbit_retry_interval=0.01, - rabbit_retry_backoff=0.01, - kombu_reconnect_delay=0, - heartbeat_timeout_threshold=0, - group="oslo_messaging_rabbit") - - self.kombu_connect = mock.Mock() - self.useFixture(mockpatch.Patch( - 'kombu.connection.Connection.connect', - side_effect=self.kombu_connect)) - self.useFixture(mockpatch.Patch( - 'kombu.connection.Connection.connection')) - self.useFixture(mockpatch.Patch( - 'kombu.connection.Connection.channel')) - - # starting from the first broker in the list - url = oslo_messaging.TransportURL.parse(self.conf, None) - self.connection = rabbit_driver.Connection(self.conf, url, - driver_common.PURPOSE_SEND) - self.addCleanup(self.connection.close) - - def test_ensure_four_retry(self): - mock_callback = mock.Mock(side_effect=IOError) - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - self.connection.ensure, mock_callback, - retry=4) - self.assertEqual(5, self.kombu_connect.call_count) - self.assertEqual(6, mock_callback.call_count) - - def test_ensure_one_retry(self): - mock_callback = mock.Mock(side_effect=IOError) - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - self.connection.ensure, mock_callback, - retry=1) - self.assertEqual(2, self.kombu_connect.call_count) - self.assertEqual(3, mock_callback.call_count) - - def test_ensure_no_retry(self): - mock_callback = mock.Mock(side_effect=IOError) - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - self.connection.ensure, mock_callback, - retry=0) - self.assertEqual(1, self.kombu_connect.call_count) - self.assertEqual(2, mock_callback.call_count) - - -class ConnectionLockTestCase(test_utils.BaseTestCase): - def _thread(self, lock, sleep, heartbeat=False): - def thread_task(): - if heartbeat: - with lock.for_heartbeat(): - time.sleep(sleep) - else: - with lock: - time.sleep(sleep) - - t = threading.Thread(target=thread_task) - t.daemon = True - t.start() - start = time.time() - - def get_elapsed_time(): - t.join() - return time.time() - start - - return get_elapsed_time - - def test_workers_only(self): - l = rabbit_driver.ConnectionLock() - t1 = self._thread(l, 1) - t2 = self._thread(l, 1) - self.assertAlmostEqual(1, t1(), places=0) - self.assertAlmostEqual(2, t2(), places=0) - - def test_worker_and_heartbeat(self): - l = rabbit_driver.ConnectionLock() - t1 = self._thread(l, 1) - t2 = self._thread(l, 1, heartbeat=True) - self.assertAlmostEqual(1, t1(), places=0) - self.assertAlmostEqual(2, t2(), places=0) - - def test_workers_and_heartbeat(self): - l = rabbit_driver.ConnectionLock() - t1 = self._thread(l, 1) - t2 = self._thread(l, 1) - t3 = self._thread(l, 1) - t4 = self._thread(l, 1, heartbeat=True) - t5 = self._thread(l, 1) - self.assertAlmostEqual(1, t1(), places=0) - self.assertAlmostEqual(2, t4(), places=0) - self.assertAlmostEqual(3, t2(), places=0) - self.assertAlmostEqual(4, t3(), places=0) - self.assertAlmostEqual(5, t5(), places=0) - - def test_heartbeat(self): - l = rabbit_driver.ConnectionLock() - t1 = self._thread(l, 1, heartbeat=True) - t2 = self._thread(l, 1) - self.assertAlmostEqual(1, t1(), places=0) - self.assertAlmostEqual(2, t2(), places=0) diff --git a/oslo_messaging/tests/drivers/test_pool.py b/oslo_messaging/tests/drivers/test_pool.py deleted file mode 100644 index a0b6ab4..0000000 --- a/oslo_messaging/tests/drivers/test_pool.py +++ /dev/null @@ -1,124 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading -import uuid - -import testscenarios - -from oslo_messaging._drivers import pool -from oslo_messaging.tests import utils as test_utils - -load_tests = testscenarios.load_tests_apply_scenarios - - -class PoolTestCase(test_utils.BaseTestCase): - - _max_size = [ - ('default_size', dict(max_size=None, n_iters=4)), - ('set_max_size', dict(max_size=10, n_iters=10)), - ] - - _create_error = [ - ('no_create_error', dict(create_error=False)), - ('create_error', dict(create_error=True)), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._max_size, - cls._create_error) - - class TestPool(pool.Pool): - - def create(self): - return uuid.uuid4() - - class ThreadWaitWaiter(object): - - """A gross hack. - - Stub out the condition variable's wait() method and spin until it - has been called by each thread. - """ - - def __init__(self, cond, n_threads, stubs): - self.cond = cond - self.stubs = stubs - self.n_threads = n_threads - self.n_waits = 0 - self.orig_wait = cond.wait - - def count_waits(**kwargs): - self.n_waits += 1 - self.orig_wait(**kwargs) - self.stubs.Set(self.cond, 'wait', count_waits) - - def wait(self): - while self.n_waits < self.n_threads: - pass - self.stubs.Set(self.cond, 'wait', self.orig_wait) - - def test_pool(self): - kwargs = {} - if self.max_size is not None: - kwargs['max_size'] = self.max_size - - p = self.TestPool(**kwargs) - - if self.create_error: - def create_error(): - raise RuntimeError - orig_create = p.create - self.stubs.Set(p, 'create', create_error) - self.assertRaises(RuntimeError, p.get) - self.stubs.Set(p, 'create', orig_create) - - objs = [] - for i in range(self.n_iters): - objs.append(p.get()) - self.assertIsInstance(objs[i], uuid.UUID) - - def wait_for_obj(): - o = p.get() - self.assertIn(o, objs) - - waiter = self.ThreadWaitWaiter(p._cond, self.n_iters, self.stubs) - - threads = [] - for i in range(self.n_iters): - t = threading.Thread(target=wait_for_obj) - t.start() - threads.append(t) - - waiter.wait() - - for o in objs: - p.put(o) - - for t in threads: - t.join() - - for o in objs: - p.put(o) - - for o in p.iter_free(): - self.assertIn(o, objs) - objs.remove(o) - - self.assertEqual([], objs) - - -PoolTestCase.generate_scenarios() diff --git a/oslo_messaging/tests/drivers/zmq/__init__.py b/oslo_messaging/tests/drivers/zmq/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/drivers/zmq/matchmaker/__init__.py b/oslo_messaging/tests/drivers/zmq/matchmaker/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/drivers/zmq/matchmaker/test_impl_matchmaker.py b/oslo_messaging/tests/drivers/zmq/matchmaker/test_impl_matchmaker.py deleted file mode 100644 index 2e369f7..0000000 --- a/oslo_messaging/tests/drivers/zmq/matchmaker/test_impl_matchmaker.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2014 Canonical, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fixtures._fixtures import timeout -import retrying -from stevedore import driver -import testscenarios -import testtools - -import oslo_messaging -from oslo_messaging.tests import utils as test_utils -from oslo_utils import importutils - -redis = importutils.try_import('redis') - - -def redis_available(): - '''Helper to see if local redis server is running''' - if not redis: - return False - try: - c = redis.StrictRedis(socket_timeout=1) - c.ping() - return True - except redis.exceptions.ConnectionError: - return False - - -load_tests = testscenarios.load_tests_apply_scenarios - - -@testtools.skipIf(not redis_available(), "redis unavailable") -class TestImplMatchmaker(test_utils.BaseTestCase): - - scenarios = [ - ("dummy", {"rpc_zmq_matchmaker": "dummy"}), - ("redis", {"rpc_zmq_matchmaker": "redis"}), - ] - - def setUp(self): - super(TestImplMatchmaker, self).setUp() - - self.test_matcher = driver.DriverManager( - 'oslo.messaging.zmq.matchmaker', - self.rpc_zmq_matchmaker, - ).driver(self.conf) - - if self.rpc_zmq_matchmaker == "redis": - self.addCleanup(self.test_matcher._redis.flushdb) - - self.target = oslo_messaging.Target(topic="test_topic") - self.host1 = b"test_host1" - self.host2 = b"test_host2" - - def test_register(self): - self.test_matcher.register(self.target, self.host1, "test") - - self.assertEqual([self.host1], - self.test_matcher.get_hosts(self.target, "test")) - - def test_register_two_hosts(self): - self.test_matcher.register(self.target, self.host1, "test") - self.test_matcher.register(self.target, self.host2, "test") - - self.assertItemsEqual(self.test_matcher.get_hosts(self.target, "test"), - [self.host1, self.host2]) - - def test_register_unsibscribe(self): - self.test_matcher.register(self.target, self.host1, "test") - self.test_matcher.register(self.target, self.host2, "test") - - self.test_matcher.unregister(self.target, self.host2, "test") - - self.assertItemsEqual(self.test_matcher.get_hosts(self.target, "test"), - [self.host1]) - - def test_register_two_same_hosts(self): - self.test_matcher.register(self.target, self.host1, "test") - self.test_matcher.register(self.target, self.host1, "test") - - self.assertEqual([self.host1], - self.test_matcher.get_hosts(self.target, "test")) - - def test_get_hosts_wrong_topic(self): - target = oslo_messaging.Target(topic="no_such_topic") - hosts = [] - try: - hosts = self.test_matcher.get_hosts(target, "test") - except (timeout.TimeoutException, retrying.RetryError): - pass - self.assertEqual([], hosts) diff --git a/oslo_messaging/tests/drivers/zmq/test_impl_zmq.py b/oslo_messaging/tests/drivers/zmq/test_impl_zmq.py deleted file mode 100644 index 04d86d9..0000000 --- a/oslo_messaging/tests/drivers/zmq/test_impl_zmq.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools -import time - -import oslo_messaging -from oslo_messaging._drivers import impl_zmq -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_socket -from oslo_messaging.tests.drivers.zmq import zmq_common -from oslo_messaging.tests import utils as test_utils - - -zmq = zmq_async.import_zmq() - - -class ZmqTestPortsRange(zmq_common.ZmqBaseTestCase): - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(ZmqTestPortsRange, self).setUp() - - # Set config values - kwargs = {'rpc_zmq_min_port': 5555, - 'rpc_zmq_max_port': 5560} - self.config(group='oslo_messaging_zmq', **kwargs) - - def test_ports_range(self): - listeners = [] - - for i in range(10): - try: - target = oslo_messaging.Target(topic='testtopic_' + str(i)) - new_listener = self.driver.listen(target, None, None) - listeners.append(new_listener) - except zmq_socket.ZmqPortBusy: - pass - - self.assertLessEqual(len(listeners), 5) - - for l in listeners: - l.cleanup() - - -class TestConfZmqDriverLoad(test_utils.BaseTestCase): - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(TestConfZmqDriverLoad, self).setUp() - self.messaging_conf.transport_driver = 'zmq' - - def test_driver_load(self): - transport = oslo_messaging.get_transport(self.conf) - self.assertIsInstance(transport._driver, impl_zmq.ZmqDriver) - - -class TestZmqBasics(zmq_common.ZmqBaseTestCase): - - def test_send_receive_raises(self): - """Call() without method.""" - target = oslo_messaging.Target(topic='testtopic') - self.listener.listen(target) - self.assertRaises( - KeyError, - self.driver.send, - target, {}, {'tx_id': 1}, - wait_for_reply=True, - timeout=60) - - def test_send_receive_topic(self): - """Call() with topic.""" - - target = oslo_messaging.Target(topic='testtopic') - self.listener.listen(target) - result = self.driver.send( - target, {}, - {'method': 'hello-world', 'tx_id': 1}, - wait_for_reply=True, - timeout=60) - self.assertTrue(result) - - def test_send_noreply(self): - """Cast() with topic.""" - - target = oslo_messaging.Target(topic='testtopic', server="my@server") - self.listener.listen(target) - time.sleep(0.01) - result = self.driver.send( - target, {}, - {'method': 'hello-world', 'tx_id': 1}, - wait_for_reply=False) - - self.listener._received.wait(5) - - self.assertIsNone(result) - self.assertTrue(self.listener._received.isSet()) - method = self.listener.message.message[u'method'] - self.assertEqual(u'hello-world', method) - - def test_send_fanout(self): - target = oslo_messaging.Target(topic='testtopic', fanout=True) - - self.listener.listen(target) - - result = self.driver.send( - target, {}, - {'method': 'hello-world', 'tx_id': 1}, - wait_for_reply=False) - - self.listener._received.wait(5) - - self.assertIsNone(result) - self.assertTrue(self.listener._received.isSet()) - method = self.listener.message.message[u'method'] - self.assertEqual(u'hello-world', method) - - def test_send_receive_direct(self): - """Call() without topic.""" - - target = oslo_messaging.Target(server='127.0.0.1') - self.listener.listen(target) - message = {'method': 'hello-world', 'tx_id': 1} - context = {} - result = self.driver.send(target, context, message, - wait_for_reply=True, - timeout=60) - self.assertTrue(result) - - def test_send_receive_notification(self): - """Notify() test""" - - target = oslo_messaging.Target(topic='t1', - server='notification@server') - self.listener.listen_notifications([(target, 'info')]) - - message = {'method': 'hello-world', 'tx_id': 1} - context = {} - target.topic += '.info' - self.driver.send_notification(target, context, message, '3.0') - self.listener._received.wait(5) - self.assertTrue(self.listener._received.isSet()) diff --git a/oslo_messaging/tests/drivers/zmq/test_pub_sub.py b/oslo_messaging/tests/drivers/zmq/test_pub_sub.py deleted file mode 100644 index 2973521..0000000 --- a/oslo_messaging/tests/drivers/zmq/test_pub_sub.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import time - -import msgpack -import six -import testscenarios - -from oslo_config import cfg - -import oslo_messaging -from oslo_messaging._drivers.zmq_driver.proxy import zmq_proxy -from oslo_messaging._drivers.zmq_driver.proxy import zmq_publisher_proxy -from oslo_messaging._drivers.zmq_driver import zmq_address -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_names -from oslo_messaging.tests.drivers.zmq import zmq_common - -load_tests = testscenarios.load_tests_apply_scenarios - -zmq = zmq_async.import_zmq() - -opt_group = cfg.OptGroup(name='zmq_proxy_opts', - title='ZeroMQ proxy options') -cfg.CONF.register_opts(zmq_proxy.zmq_proxy_opts, group=opt_group) - - -class TestPubSub(zmq_common.ZmqBaseTestCase): - - LISTENERS_COUNT = 3 - - scenarios = [ - ('json', {'serialization': 'json', - 'dumps': lambda obj: six.b(json.dumps(obj))}), - ('msgpack', {'serialization': 'msgpack', - 'dumps': msgpack.dumps}) - ] - - def setUp(self): - super(TestPubSub, self).setUp() - - kwargs = {'use_pub_sub': True, - 'rpc_zmq_serialization': self.serialization} - self.config(group='oslo_messaging_zmq', **kwargs) - - self.config(host="127.0.0.1", group="zmq_proxy_opts") - self.config(publisher_port="0", group="zmq_proxy_opts") - - self.publisher = zmq_publisher_proxy.PublisherProxy( - self.conf, self.driver.matchmaker) - self.driver.matchmaker.register_publisher( - (self.publisher.host, "")) - - self.listeners = [] - for i in range(self.LISTENERS_COUNT): - self.listeners.append(zmq_common.TestServerListener(self.driver)) - - def tearDown(self): - super(TestPubSub, self).tearDown() - self.publisher.cleanup() - for listener in self.listeners: - listener.stop() - - def _send_request(self, target): - # Needed only in test env to give listener a chance to connect - # before request fires - time.sleep(1) - context = {} - message = {'method': 'hello-world'} - - self.publisher.send_request( - [zmq_names.CAST_FANOUT_TYPE, - zmq_address.target_to_subscribe_filter(target), - b"message", - b"0000-0000", - self.dumps(context), - self.dumps(message)]) - - def _check_listener(self, listener): - listener._received.wait(timeout=5) - self.assertTrue(listener._received.isSet()) - method = listener.message.message[u'method'] - self.assertEqual(u'hello-world', method) - - def _check_listener_negative(self, listener): - listener._received.wait(timeout=1) - self.assertFalse(listener._received.isSet()) - - def test_single_listener(self): - target = oslo_messaging.Target(topic='testtopic', fanout=True) - self.listener.listen(target) - - self._send_request(target) - - self._check_listener(self.listener) - - def test_all_listeners(self): - target = oslo_messaging.Target(topic='testtopic', fanout=True) - - for listener in self.listeners: - listener.listen(target) - - self._send_request(target) - - for listener in self.listeners: - self._check_listener(listener) - - def test_filtered(self): - target = oslo_messaging.Target(topic='testtopic', fanout=True) - target_wrong = oslo_messaging.Target(topic='wrong', fanout=True) - - self.listeners[0].listen(target) - self.listeners[1].listen(target) - self.listeners[2].listen(target_wrong) - - self._send_request(target) - - self._check_listener(self.listeners[0]) - self._check_listener(self.listeners[1]) - self._check_listener_negative(self.listeners[2]) - - def test_topic_part_matching(self): - target = oslo_messaging.Target(topic='testtopic', server='server') - target_part = oslo_messaging.Target(topic='testtopic', fanout=True) - - self.listeners[0].listen(target) - self.listeners[1].listen(target) - - self._send_request(target_part) - - self._check_listener(self.listeners[0]) - self._check_listener(self.listeners[1]) diff --git a/oslo_messaging/tests/drivers/zmq/test_zmq_async.py b/oslo_messaging/tests/drivers/zmq/test_zmq_async.py deleted file mode 100644 index a2caf12..0000000 --- a/oslo_messaging/tests/drivers/zmq/test_zmq_async.py +++ /dev/null @@ -1,114 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from oslo_messaging._drivers.zmq_driver.poller import green_poller -from oslo_messaging._drivers.zmq_driver.poller import threading_poller -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging.tests import utils as test_utils - -zmq = zmq_async.import_zmq() - - -class TestImportZmq(test_utils.BaseTestCase): - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(TestImportZmq, self).setUp() - - def test_when_eventlet_is_available_then_load_eventlet_green_zmq(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: True - - mock_try_import = mock.Mock() - zmq_async.importutils.try_import = mock_try_import - - zmq_async.import_zmq() - - mock_try_import.assert_called_with('eventlet.green.zmq', default=None) - - def test_when_evetlet_is_unavailable_then_load_zmq(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: False - - mock_try_import = mock.Mock() - zmq_async.importutils.try_import = mock_try_import - - zmq_async.import_zmq() - - mock_try_import.assert_called_with('zmq', default=None) - - -class TestGetPoller(test_utils.BaseTestCase): - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(TestGetPoller, self).setUp() - - def test_when_eventlet_is_available_then_return_GreenPoller(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: True - - actual = zmq_async.get_poller() - - self.assertTrue(isinstance(actual, green_poller.GreenPoller)) - - def test_when_eventlet_is_unavailable_then_return_ThreadingPoller(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: False - - actual = zmq_async.get_poller() - - self.assertTrue(isinstance(actual, threading_poller.ThreadingPoller)) - - -class TestGetReplyPoller(test_utils.BaseTestCase): - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(TestGetReplyPoller, self).setUp() - - def test_when_eventlet_is_available_then_return_HoldReplyPoller(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: True - - actual = zmq_async.get_poller() - - self.assertTrue(isinstance(actual, green_poller.GreenPoller)) - - def test_when_eventlet_is_unavailable_then_return_ThreadingPoller(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: False - - actual = zmq_async.get_poller() - - self.assertTrue(isinstance(actual, threading_poller.ThreadingPoller)) - - -class TestGetExecutor(test_utils.BaseTestCase): - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(TestGetExecutor, self).setUp() - - def test_when_eventlet_module_is_available_then_return_GreenExecutor(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: True - - executor = zmq_async.get_executor('any method') - - self.assertTrue(isinstance(executor, green_poller.GreenExecutor)) - self.assertEqual('any method', executor._method) - - def test_when_eventlet_is_unavailable_then_return_ThreadingExecutor(self): - zmq_async.eventletutils.is_monkey_patched = lambda _: False - - executor = zmq_async.get_executor('any method') - - self.assertTrue(isinstance(executor, - threading_poller.ThreadingExecutor)) - self.assertEqual('any method', executor._method) diff --git a/oslo_messaging/tests/drivers/zmq/test_zmq_transport_url.py b/oslo_messaging/tests/drivers/zmq/test_zmq_transport_url.py deleted file mode 100644 index f80ee91..0000000 --- a/oslo_messaging/tests/drivers/zmq/test_zmq_transport_url.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -import oslo_messaging -from oslo_messaging._drivers import common -from oslo_messaging._drivers.zmq_driver.matchmaker.base import DummyMatchMaker -from oslo_messaging._drivers.zmq_driver.matchmaker import matchmaker_redis -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging.tests import utils as test_utils - - -zmq = zmq_async.import_zmq() - - -class TestZmqTransportUrl(test_utils.BaseTestCase): - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(TestZmqTransportUrl, self).setUp() - - def setup_url(self, url): - transport = oslo_messaging.get_transport(self.conf, url) - self.addCleanup(transport.cleanup) - driver = transport._driver - return driver, url - - def test_empty_url(self): - driver, url = self.setup_url("zmq:///") - self.assertIs(matchmaker_redis.RedisMatchMaker, - driver.matchmaker.__class__) - self.assertEqual('zmq', driver.matchmaker.url.transport) - - def test_error_name(self): - self.assertRaises(common.RPCException, self.setup_url, "zmq+error:///") - - def test_dummy_url(self): - driver, url = self.setup_url("zmq+dummy:///") - self.assertIs(DummyMatchMaker, - driver.matchmaker.__class__) - self.assertEqual('zmq+dummy', driver.matchmaker.url.transport) - - def test_redis_url(self): - driver, url = self.setup_url("zmq+redis:///") - self.assertIs(matchmaker_redis.RedisMatchMaker, - driver.matchmaker.__class__) - self.assertEqual('zmq+redis', driver.matchmaker.url.transport) - - def test_redis_url_no_creds(self): - driver, url = self.setup_url("zmq+redis://host:65123/") - self.assertIs(matchmaker_redis.RedisMatchMaker, - driver.matchmaker.__class__) - self.assertEqual('zmq+redis', driver.matchmaker.url.transport) - self.assertEqual("host", driver.matchmaker.standalone_redis["host"]) - self.assertEqual(65123, driver.matchmaker.standalone_redis["port"]) - - def test_redis_url_no_port(self): - driver, url = self.setup_url("zmq+redis://:p12@host:65123/") - self.assertIs(matchmaker_redis.RedisMatchMaker, - driver.matchmaker.__class__) - self.assertEqual('zmq+redis', driver.matchmaker.url.transport) - self.assertEqual("host", driver.matchmaker.standalone_redis["host"]) - self.assertEqual(65123, driver.matchmaker.standalone_redis["port"]) - self.assertEqual("p12", driver.matchmaker.standalone_redis["password"]) - - def test_sentinel_multiple_hosts_url(self): - driver, url = self.setup_url( - "zmq+redis://sentinel1:20001,sentinel2:20001,sentinel3:20001/") - self.assertIs(matchmaker_redis.RedisMatchMaker, - driver.matchmaker.__class__) - self.assertEqual('zmq+redis', driver.matchmaker.url.transport) - self.assertEqual(3, len(driver.matchmaker.sentinel_hosts)) - expected = [("sentinel1", 20001), ("sentinel2", 20001), - ("sentinel3", 20001)] - self.assertEqual(expected, driver.matchmaker.sentinel_hosts) diff --git a/oslo_messaging/tests/drivers/zmq/zmq_common.py b/oslo_messaging/tests/drivers/zmq/zmq_common.py deleted file mode 100644 index ff48bfb..0000000 --- a/oslo_messaging/tests/drivers/zmq/zmq_common.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import threading - -import fixtures -import testtools - -import oslo_messaging -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging._drivers.zmq_driver import zmq_options -from oslo_messaging._i18n import _LE -from oslo_messaging.tests import utils as test_utils - -LOG = logging.getLogger(__name__) - -zmq = zmq_async.import_zmq() - - -class TestServerListener(object): - - def __init__(self, driver): - self.driver = driver - self.listener = None - self.executor = zmq_async.get_executor(self._run) - self._stop = threading.Event() - self._received = threading.Event() - self.message = None - - def listen(self, target): - self.listener = self.driver.listen(target, None, - None)._poll_style_listener - self.executor.execute() - - def listen_notifications(self, targets_and_priorities): - self.listener = self.driver.listen_for_notifications( - targets_and_priorities, None, None, None)._poll_style_listener - self.executor.execute() - - def _run(self): - try: - messages = self.listener.poll() - if messages: - message = messages[0] - message.acknowledge() - self._received.set() - self.message = message - message.reply(reply=True) - except Exception: - LOG.exception(_LE("Unexpected exception occurred.")) - - def stop(self): - self.executor.stop() - - -class ZmqBaseTestCase(test_utils.BaseTestCase): - """Base test case for all ZMQ tests """ - - @testtools.skipIf(zmq is None, "zmq not available") - def setUp(self): - super(ZmqBaseTestCase, self).setUp() - self.messaging_conf.transport_driver = 'zmq' - zmq_options.register_opts(self.conf) - - # Set config values - self.internal_ipc_dir = self.useFixture(fixtures.TempDir()).path - kwargs = {'rpc_zmq_bind_address': '127.0.0.1', - 'rpc_zmq_host': '127.0.0.1', - 'rpc_zmq_ipc_dir': self.internal_ipc_dir, - 'use_pub_sub': False, - 'use_router_proxy': False, - 'rpc_zmq_matchmaker': 'dummy'} - self.config(group='oslo_messaging_zmq', **kwargs) - self.config(rpc_response_timeout=5) - - # Get driver - transport = oslo_messaging.get_transport(self.conf) - self.driver = transport._driver - - self.listener = TestServerListener(self.driver) - - self.addCleanup(StopRpc(self.__dict__)) - - -class StopRpc(object): - def __init__(self, attrs): - self.attrs = attrs - - def __call__(self): - if self.attrs['driver']: - self.attrs['driver'].cleanup() - if self.attrs['listener']: - self.attrs['listener'].stop() diff --git a/oslo_messaging/tests/functional/__init__.py b/oslo_messaging/tests/functional/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/functional/gate/gate_hook.sh b/oslo_messaging/tests/functional/gate/gate_hook.sh deleted file mode 100755 index 581f6a4..0000000 --- a/oslo_messaging/tests/functional/gate/gate_hook.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# TODO(sileht): delete once the infra code have been cleanup diff --git a/oslo_messaging/tests/functional/gate/post_test_hook.sh b/oslo_messaging/tests/functional/gate/post_test_hook.sh deleted file mode 100755 index 66cfaf2..0000000 --- a/oslo_messaging/tests/functional/gate/post_test_hook.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -RPC_BACKEND=$1 -PYTHON=${2:-py27} - -function generate_testr_results { - if [ -f .testrepository/0 ]; then - sudo .tox/${PYTHON}-func-${RPC_BACKEND}/bin/testr last --subunit > $WORKSPACE/testrepository.subunit - sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit - sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html - sudo gzip -9 $BASE/logs/testrepository.subunit - sudo gzip -9 $BASE/logs/testr_results.html - sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - fi -} - -# Allow jenkins to retrieve reports -sudo chown -R jenkins:stack $BASE/new/oslo.messaging - -set +e - -if [ -x "$(command -v yum)" ]; then - sudo yum install -y libuuid-devel swig pkg-config -else - sudo apt-get update -y - sudo apt-get install -y uuid-dev swig pkg-config -fi - -# Install required packages -case $RPC_BACKEND in - zeromq) - sudo apt-get update -y - sudo apt-get install -y redis-server python-redis - ;; - amqp1) - sudo yum install -y qpid-cpp-server qpid-proton-c-devel python-qpid-proton cyrus-sasl-lib cyrus-sasl-plain - ;; - rabbit) - sudo apt-get update -y - sudo apt-get install -y rabbitmq-server - ;; -esac - -# Got to the oslo.messaging dir -cd $BASE/new/oslo.messaging - -# Run tests -echo "Running oslo.messaging functional test suite" -# Preserve env for OS_ credentials -sudo -E -H -u jenkins tox -e ${PYTHON}-func-$RPC_BACKEND -EXIT_CODE=$? -set -e - -# Collect and parse result -generate_testr_results -exit $EXIT_CODE diff --git a/oslo_messaging/tests/functional/notify/__init__.py b/oslo_messaging/tests/functional/notify/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/functional/notify/test_logger.py b/oslo_messaging/tests/functional/notify/test_logger.py deleted file mode 100644 index 4716776..0000000 --- a/oslo_messaging/tests/functional/notify/test_logger.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2015 NetEase Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import testscenarios - -import oslo_messaging -from oslo_messaging.tests.functional import utils - -load_tests = testscenarios.load_tests_apply_scenarios - - -class LoggingNotificationHandlerTestCase(utils.SkipIfNoTransportURL): - """Test case for `oslo_messaging.LoggingNotificationHandler` - - Build up a logger using this handler, then test logging under messaging and - messagingv2 driver. Make sure receive expected logging notifications. - """ - - _priority = [ - ('debug', dict(priority='debug')), - ('info', dict(priority='info')), - ('warn', dict(priority='warn')), - ('error', dict(priority='error')), - ('critical', dict(priority='critical')), - ] - - _driver = [ - ('messaging', dict(driver='messaging')), - ('messagingv2', dict(driver='messagingv2')), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._priority, - cls._driver) - - def test_logging(self): - # NOTE(gtt): Using different topic to make tests run in parallel - topic = 'test_logging_%s_driver_%s' % (self.priority, self.driver) - - self.config(driver=[self.driver], - topics=[topic], - group='oslo_messaging_notifications') - - listener = self.useFixture( - utils.NotificationFixture(self.conf, self.url, [topic])) - - log_notify = oslo_messaging.LoggingNotificationHandler(self.url) - - log = logging.getLogger(topic) - log.setLevel(logging.DEBUG) - log.addHandler(log_notify) - - log_method = getattr(log, self.priority) - log_method('Test logging at priority: %s' % self.priority) - - events = listener.get_events(timeout=1) - self.assertEqual(1, len(events)) - - info_event = events[0] - - self.assertEqual(self.priority, info_event[0]) - self.assertEqual('logrecord', info_event[1]) - - for key in ['name', 'thread', 'extra', 'process', 'funcName', - 'levelno', 'processName', 'pathname', 'lineno', - 'msg', 'exc_info', 'levelname']: - self.assertTrue(key in info_event[2]) - - -LoggingNotificationHandlerTestCase.generate_scenarios() diff --git a/oslo_messaging/tests/functional/test_functional.py b/oslo_messaging/tests/functional/test_functional.py deleted file mode 100644 index 39422f5..0000000 --- a/oslo_messaging/tests/functional/test_functional.py +++ /dev/null @@ -1,346 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time -import uuid - -import concurrent.futures -from oslo_config import cfg -import six.moves -from testtools import matchers - -import oslo_messaging -from oslo_messaging.tests.functional import utils - - -class CallTestCase(utils.SkipIfNoTransportURL): - - def setUp(self): - super(CallTestCase, self).setUp(conf=cfg.ConfigOpts()) - - self.conf.prog = "test_prog" - self.conf.project = "test_project" - - self.config(heartbeat_timeout_threshold=0, - group='oslo_messaging_rabbit') - - def test_specific_server(self): - group = self.useFixture(utils.RpcServerGroupFixture( - self.conf, self.url) - ) - client = group.client(1) - client.append(text='open') - self.assertEqual('openstack', client.append(text='stack')) - client.add(increment=2) - self.assertEqual(12, client.add(increment=10)) - self.assertEqual(9, client.subtract(increment=3)) - self.assertEqual('openstack', group.servers[1].endpoint.sval) - self.assertEqual(9, group.servers[1].endpoint.ival) - for i in [0, 2]: - self.assertEqual('', group.servers[i].endpoint.sval) - self.assertEqual(0, group.servers[i].endpoint.ival) - - def test_server_in_group(self): - group = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url) - ) - - client = group.client() - data = [c for c in 'abcdefghijklmn'] - for i in data: - client.append(text=i) - - for s in group.servers: - self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) - actual = [[c for c in s.endpoint.sval] for s in group.servers] - self.assertThat(actual, utils.IsValidDistributionOf(data)) - - def test_different_exchanges(self): - # If the different exchanges are not honoured, then the - # teardown may hang unless we broadcast all control messages - # to each server - group1 = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url, - use_fanout_ctrl=True)) - group2 = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url, exchange="a", - use_fanout_ctrl=True)) - group3 = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url, exchange="b", - use_fanout_ctrl=True)) - - client1 = group1.client(1) - data1 = [c for c in 'abcdefghijklmn'] - for i in data1: - client1.append(text=i) - - client2 = group2.client() - data2 = [c for c in 'opqrstuvwxyz'] - for i in data2: - client2.append(text=i) - - actual1 = [[c for c in s.endpoint.sval] for s in group1.servers] - self.assertThat(actual1, utils.IsValidDistributionOf(data1)) - actual1 = [c for c in group1.servers[1].endpoint.sval] - self.assertThat([actual1], utils.IsValidDistributionOf(data1)) - for s in group1.servers: - expected = len(data1) if group1.servers.index(s) == 1 else 0 - self.assertEqual(expected, len(s.endpoint.sval)) - self.assertEqual(0, s.endpoint.ival) - - actual2 = [[c for c in s.endpoint.sval] for s in group2.servers] - for s in group2.servers: - self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) - self.assertEqual(0, s.endpoint.ival) - self.assertThat(actual2, utils.IsValidDistributionOf(data2)) - - for s in group3.servers: - self.assertEqual(0, len(s.endpoint.sval)) - self.assertEqual(0, s.endpoint.ival) - - def test_timeout(self): - transport = self.useFixture( - utils.TransportFixture(self.conf, self.url) - ) - target = oslo_messaging.Target(topic="no_such_topic") - c = utils.ClientStub(transport.transport, target, timeout=1) - self.assertThat(c.ping, - matchers.raises(oslo_messaging.MessagingTimeout)) - - def test_exception(self): - group = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url) - ) - client = group.client(1) - client.add(increment=2) - self.assertRaises(ValueError, client.subtract, increment=3) - - def test_timeout_with_concurrently_queues(self): - transport = self.useFixture( - utils.TransportFixture(self.conf, self.url) - ) - target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), - server="server_" + str(uuid.uuid4())) - server = self.useFixture( - utils.RpcServerFixture(self.conf, self.url, target, - executor="threading")) - client = utils.ClientStub(transport.transport, target, - cast=False, timeout=5) - - def short_periodical_tasks(): - for i in range(10): - client.add(increment=1) - time.sleep(1) - - with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: - future = executor.submit(client.long_running_task, seconds=10) - executor.submit(short_periodical_tasks) - self.assertRaises(oslo_messaging.MessagingTimeout, future.result) - - self.assertEqual(10, server.endpoint.ival) - - -class CastTestCase(utils.SkipIfNoTransportURL): - # Note: casts return immediately, so these tests utilise a special - # internal sync() cast to ensure prior casts are complete before - # making the necessary assertions. - - def test_specific_server(self): - group = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url) - ) - client = group.client(1, cast=True) - client.append(text='open') - client.append(text='stack') - client.add(increment=2) - client.add(increment=10) - client.sync() - - group.sync(1) - self.assertIn(group.servers[1].endpoint.sval, - ["openstack", "stackopen"]) - self.assertEqual(12, group.servers[1].endpoint.ival) - for i in [0, 2]: - self.assertEqual('', group.servers[i].endpoint.sval) - self.assertEqual(0, group.servers[i].endpoint.ival) - - def test_server_in_group(self): - if self.url.startswith("amqp:"): - self.skipTest("QPID-6307") - group = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url) - ) - client = group.client(cast=True) - for i in range(20): - client.add(increment=1) - for i in range(len(group.servers)): - # expect each server to get a sync - client.sync() - group.sync(server="all") - total = 0 - for s in group.servers: - ival = s.endpoint.ival - self.assertThat(ival, matchers.GreaterThan(0)) - self.assertThat(ival, matchers.LessThan(20)) - total += ival - self.assertEqual(20, total) - - def test_fanout(self): - group = self.useFixture( - utils.RpcServerGroupFixture(self.conf, self.url) - ) - client = group.client('all', cast=True) - client.append(text='open') - client.append(text='stack') - client.add(increment=2) - client.add(increment=10) - client.sync() - group.sync(server='all') - for s in group.servers: - self.assertIn(s.endpoint.sval, ["openstack", "stackopen"]) - self.assertEqual(12, s.endpoint.ival) - - -class NotifyTestCase(utils.SkipIfNoTransportURL): - # NOTE(sileht): Each test must not use the same topics - # to be run in parallel - - def test_simple(self): - listener = self.useFixture( - utils.NotificationFixture(self.conf, self.url, ['test_simple'])) - notifier = listener.notifier('abc') - - notifier.info({}, 'test', 'Hello World!') - event = listener.events.get(timeout=1) - self.assertEqual('info', event[0]) - self.assertEqual('test', event[1]) - self.assertEqual('Hello World!', event[2]) - self.assertEqual('abc', event[3]) - - def test_multiple_topics(self): - listener = self.useFixture( - utils.NotificationFixture(self.conf, self.url, ['a', 'b'])) - a = listener.notifier('pub-a', topic='a') - b = listener.notifier('pub-b', topic='b') - - sent = { - 'pub-a': [a, 'test-a', 'payload-a'], - 'pub-b': [b, 'test-b', 'payload-b'] - } - for e in sent.values(): - e[0].info({}, e[1], e[2]) - - received = {} - while len(received) < len(sent): - e = listener.events.get(timeout=1) - received[e[3]] = e - - for key in received: - actual = received[key] - expected = sent[key] - self.assertEqual('info', actual[0]) - self.assertEqual(expected[1], actual[1]) - self.assertEqual(expected[2], actual[2]) - - def test_multiple_servers(self): - if self.url.startswith("amqp:"): - self.skipTest("QPID-6307") - if self.url.startswith("zmq:"): - self.skipTest("ZeroMQ-PUB-SUB") - - listener_a = self.useFixture( - utils.NotificationFixture(self.conf, self.url, ['test-topic'])) - - listener_b = self.useFixture( - utils.NotificationFixture(self.conf, self.url, ['test-topic'])) - - n = listener_a.notifier('pub') - - events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh'] - for event_type, payload in events_out: - n.info({}, event_type, payload) - - events_in = [[(e[1], e[2]) for e in listener_a.get_events()], - [(e[1], e[2]) for e in listener_b.get_events()]] - - self.assertThat(events_in, utils.IsValidDistributionOf(events_out)) - for stream in events_in: - self.assertThat(len(stream), matchers.GreaterThan(0)) - - def test_independent_topics(self): - listener_a = self.useFixture( - utils.NotificationFixture(self.conf, self.url, ['1'])) - listener_b = self.useFixture( - utils.NotificationFixture(self.conf, self.url, ['2'])) - - a = listener_a.notifier('pub-1', topic='1') - b = listener_b.notifier('pub-2', topic='2') - - a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh'] - for event_type, payload in a_out: - a.info({}, event_type, payload) - - b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop'] - for event_type, payload in b_out: - b.info({}, event_type, payload) - - for expected in a_out: - actual = listener_a.events.get(timeout=0.5) - self.assertEqual('info', actual[0]) - self.assertEqual(expected[0], actual[1]) - self.assertEqual(expected[1], actual[2]) - self.assertEqual('pub-1', actual[3]) - - for expected in b_out: - actual = listener_b.events.get(timeout=0.5) - self.assertEqual('info', actual[0]) - self.assertEqual(expected[0], actual[1]) - self.assertEqual(expected[1], actual[2]) - self.assertEqual('pub-2', actual[3]) - - def test_all_categories(self): - listener = self.useFixture(utils.NotificationFixture( - self.conf, self.url, ['test_all_categories'])) - n = listener.notifier('abc') - - cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical'] - events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats] - for e in events: - e[0]({}, e[2], e[3]) - - # order between events with different categories is not guaranteed - received = {} - for expected in events: - e = listener.events.get(timeout=1) - received[e[0]] = e - - for expected in events: - actual = received[expected[1]] - self.assertEqual(expected[1], actual[0]) - self.assertEqual(expected[2], actual[1]) - self.assertEqual(expected[3], actual[2]) - - def test_simple_batch(self): - listener = self.useFixture( - utils.BatchNotificationFixture(self.conf, self.url, - ['test_simple_batch'], - batch_size=100, batch_timeout=2)) - notifier = listener.notifier('abc') - - for i in six.moves.range(0, 205): - notifier.info({}, 'test%s' % i, 'Hello World!') - events = listener.get_events(timeout=3) - self.assertEqual(3, len(events), events) - self.assertEqual(100, len(events[0][1])) - self.assertEqual(100, len(events[1][1])) - self.assertEqual(5, len(events[2][1])) diff --git a/oslo_messaging/tests/functional/test_rabbitmq.py b/oslo_messaging/tests/functional/test_rabbitmq.py deleted file mode 100644 index 9a0427e..0000000 --- a/oslo_messaging/tests/functional/test_rabbitmq.py +++ /dev/null @@ -1,147 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os -import signal -import time - -import fixtures -from pifpaf.drivers import rabbitmq - -from oslo_messaging.tests.functional import utils -from oslo_messaging.tests import utils as test_utils - - -class ConnectedPortMatcher(object): - def __init__(self, port): - self.port = port - - def __eq__(self, data): - return data.get("port") == self.port - - def __repr__(self): - return "" % self.port - - -class RabbitMQFailoverTests(test_utils.BaseTestCase): - DRIVERS = [ - "rabbit", - ] - - def test_failover_scenario(self): - # NOTE(sileht): run this test only if functional suite run of a driver - # that use rabbitmq as backend - self.driver = os.environ.get('TRANSPORT_DRIVER') - if self.driver not in self.DRIVERS: - self.skipTest("TRANSPORT_DRIVER is not set to a rabbit driver") - - # NOTE(sileht): Allow only one response at a time, to - # have only one tcp connection for reply and ensure it will failover - # correctly - self.config(heartbeat_timeout_threshold=1, - rpc_conn_pool_size=1, - kombu_reconnect_delay=0, - rabbit_retry_interval=0, - rabbit_retry_backoff=0, - group='oslo_messaging_rabbit') - - self.pifpaf = self.useFixture(rabbitmq.RabbitMQDriver(cluster=True, - port=5692)) - - self.url = self.pifpaf.env["PIFPAF_URL"] - self.n1 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME1"] - self.n2 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME2"] - self.n3 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME3"] - - # NOTE(gdavoian): additional tweak for pika driver - if self.driver == "pika": - self.url = self.url.replace("rabbit", "pika") - - # ensure connections will be establish to the first node - self.pifpaf.stop_node(self.n2) - self.pifpaf.stop_node(self.n3) - - self.servers = self.useFixture(utils.RpcServerGroupFixture( - self.conf, self.url, endpoint=self, names=["server"])) - - # Don't randomize rabbit hosts - self.useFixture(fixtures.MockPatch( - 'oslo_messaging._drivers.impl_rabbit.random', - side_effect=lambda x: x)) - - # NOTE(sileht): this connects server connections and reply - # connection to nodename n1 - self.client = self.servers.client(0) - self.client.ping() - self._check_ports(self.pifpaf.port) - - # Switch to node n2 - self.pifpaf.start_node(self.n2) - self.assertEqual("callback done", self.client.kill_and_process()) - self.assertEqual("callback done", self.client.just_process()) - self._check_ports(self.pifpaf.get_port(self.n2)) - - # Switch to node n3 - self.pifpaf.start_node(self.n3) - time.sleep(0.1) - self.pifpaf.kill_node(self.n2, signal=signal.SIGKILL) - time.sleep(0.1) - self.assertEqual("callback done", self.client.just_process()) - self._check_ports(self.pifpaf.get_port(self.n3)) - - self.pifpaf.start_node(self.n1) - time.sleep(0.1) - self.pifpaf.kill_node(self.n3, signal=signal.SIGKILL) - time.sleep(0.1) - self.assertEqual("callback done", self.client.just_process()) - self._check_ports(self.pifpaf.get_port(self.n1)) - - def kill_and_process(self, *args, **kargs): - self.pifpaf.kill_node(self.n1, signal=signal.SIGKILL) - time.sleep(0.1) - return "callback done" - - def just_process(self, *args, **kargs): - return "callback done" - - def _get_log_call_startswith(self, filter): - return [call for call in self.logger.debug.mock_calls - if call[1][0].startswith(filter)] - - def _check_ports(self, port): - getattr(self, '_check_ports_%s_driver' % self.driver)(port) - - def _check_ports_pika_driver(self, port): - rpc_server = self.servers.servers[0].server - # FIXME(sileht): Check other connections - connections = [ - rpc_server.listener._connection - ] - for conn in connections: - self.assertEqual( - port, conn._impl.socket.getpeername()[1]) - - def _check_ports_rabbit_driver(self, port): - rpc_server = self.servers.servers[0].server - connection_contexts = [ - # rpc server - rpc_server.listener._poll_style_listener.conn, - # rpc client - self.client.client.transport._driver._get_connection(), - # rpc client replies waiter - self.client.client.transport._driver._reply_q_conn, - ] - - for cctxt in connection_contexts: - socket = cctxt.connection.channel.connection.sock - self.assertEqual(port, socket.getpeername()[1]) diff --git a/oslo_messaging/tests/functional/utils.py b/oslo_messaging/tests/functional/utils.py deleted file mode 100644 index 1d215a5..0000000 --- a/oslo_messaging/tests/functional/utils.py +++ /dev/null @@ -1,430 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import time -import uuid - -import fixtures -from oslo_config import cfg -from six import moves - -import oslo_messaging -from oslo_messaging.notify import notifier -from oslo_messaging.tests import utils as test_utils - - -class TestServerEndpoint(object): - """This MessagingServer that will be used during functional testing.""" - - def __init__(self): - self.ival = 0 - self.sval = '' - - def add(self, ctxt, increment): - self.ival += increment - return self.ival - - def subtract(self, ctxt, increment): - if self.ival < increment: - raise ValueError("ival can't go negative!") - self.ival -= increment - return self.ival - - def append(self, ctxt, text): - self.sval += text - return self.sval - - def long_running_task(self, ctxt, seconds): - time.sleep(seconds) - - -class TransportFixture(fixtures.Fixture): - """Fixture defined to setup the oslo_messaging transport.""" - - def __init__(self, conf, url): - self.conf = conf - self.url = url - - def setUp(self): - super(TransportFixture, self).setUp() - self.transport = oslo_messaging.get_transport(self.conf, url=self.url) - - def cleanUp(self): - try: - self.transport.cleanup() - except fixtures.TimeoutException: - pass - super(TransportFixture, self).cleanUp() - - def wait(self): - # allow time for the server to connect to the broker - time.sleep(0.5) - - -class RpcServerFixture(fixtures.Fixture): - """Fixture to setup the TestServerEndpoint.""" - - def __init__(self, conf, url, target, endpoint=None, ctrl_target=None, - executor='eventlet'): - super(RpcServerFixture, self).__init__() - self.conf = conf - self.url = url - self.target = target - self.endpoint = endpoint or TestServerEndpoint() - self.executor = executor - self.syncq = moves.queue.Queue() - self.ctrl_target = ctrl_target or self.target - - def setUp(self): - super(RpcServerFixture, self).setUp() - endpoints = [self.endpoint, self] - transport = self.useFixture(TransportFixture(self.conf, self.url)) - self.server = oslo_messaging.get_rpc_server( - transport=transport.transport, - target=self.target, - endpoints=endpoints, - executor=self.executor) - self._ctrl = oslo_messaging.RPCClient(transport.transport, - self.ctrl_target) - self._start() - transport.wait() - - def cleanUp(self): - self._stop() - super(RpcServerFixture, self).cleanUp() - - def _start(self): - self.thread = test_utils.ServerThreadHelper(self.server) - self.thread.start() - - def _stop(self): - self.thread.stop() - self._ctrl.cast({}, 'ping') - self.thread.join() - - def ping(self, ctxt): - pass - - def sync(self, ctxt): - self.syncq.put('x') - - -class RpcServerGroupFixture(fixtures.Fixture): - def __init__(self, conf, url, topic=None, names=None, exchange=None, - use_fanout_ctrl=False, endpoint=None): - self.conf = conf - self.url = url - # NOTE(sileht): topic and servier_name must be uniq - # to be able to run all tests in parallel - self.topic = topic or str(uuid.uuid4()) - self.names = names or ["server_%i_%s" % (i, str(uuid.uuid4())[:8]) - for i in range(3)] - self.exchange = exchange - self.targets = [self._target(server=n) for n in self.names] - self.use_fanout_ctrl = use_fanout_ctrl - self.endpoint = endpoint - - def setUp(self): - super(RpcServerGroupFixture, self).setUp() - self.servers = [self.useFixture(self._server(t)) for t in self.targets] - - def _target(self, server=None, fanout=False): - t = oslo_messaging.Target(exchange=self.exchange, topic=self.topic) - t.server = server - t.fanout = fanout - return t - - def _server(self, target): - ctrl = None - if self.use_fanout_ctrl: - ctrl = self._target(fanout=True) - server = RpcServerFixture(self.conf, self.url, target, - endpoint=self.endpoint, - ctrl_target=ctrl) - return server - - def client(self, server=None, cast=False): - if server is None: - target = self._target() - else: - if server == 'all': - target = self._target(fanout=True) - elif server >= 0 and server < len(self.targets): - target = self.targets[server] - else: - raise ValueError("Invalid value for server: %r" % server) - - transport = self.useFixture(TransportFixture(self.conf, self.url)) - client = ClientStub(transport.transport, target, cast=cast, - timeout=5) - transport.wait() - return client - - def sync(self, server=None): - if server is None: - for i in range(len(self.servers)): - self.client(i).ping() - else: - if server == 'all': - for s in self.servers: - s.syncq.get(timeout=5) - elif server >= 0 and server < len(self.targets): - self.servers[server].syncq.get(timeout=5) - else: - raise ValueError("Invalid value for server: %r" % server) - - -class RpcCall(object): - def __init__(self, client, method, context): - self.client = client - self.method = method - self.context = context - - def __call__(self, **kwargs): - self.context['time'] = time.ctime() - self.context['cast'] = False - result = self.client.call(self.context, self.method, **kwargs) - return result - - -class RpcCast(RpcCall): - def __call__(self, **kwargs): - self.context['time'] = time.ctime() - self.context['cast'] = True - self.client.cast(self.context, self.method, **kwargs) - - -class ClientStub(object): - def __init__(self, transport, target, cast=False, name=None, **kwargs): - self.name = name or "functional-tests" - self.cast = cast - self.client = oslo_messaging.RPCClient(transport, target, **kwargs) - - def __getattr__(self, name): - context = {"application": self.name} - if self.cast: - return RpcCast(self.client, name, context) - else: - return RpcCall(self.client, name, context) - - -class InvalidDistribution(object): - def __init__(self, original, received): - self.original = original - self.received = received - self.missing = [] - self.extra = [] - self.wrong_order = [] - - def describe(self): - text = "Sent %s, got %s; " % (self.original, self.received) - e1 = ["%r was missing" % m for m in self.missing] - e2 = ["%r was not expected" % m for m in self.extra] - e3 = ["%r expected before %r" % (m[0], m[1]) for m in self.wrong_order] - return text + ", ".join(e1 + e2 + e3) - - def __len__(self): - return len(self.extra) + len(self.missing) + len(self.wrong_order) - - def get_details(self): - return {} - - -class IsValidDistributionOf(object): - """Test whether a given list can be split into particular - sub-lists. All items in the original list must be in exactly one - sub-list, and must appear in that sub-list in the same order with - respect to any other items as in the original list. - """ - def __init__(self, original): - self.original = original - - def __str__(self): - return 'IsValidDistribution(%s)' % self.original - - def match(self, actual): - errors = InvalidDistribution(self.original, actual) - received = [[i for i in l] for l in actual] - - def _remove(obj, lists): - for l in lists: - if obj in l: - front = l[0] - l.remove(obj) - return front - return None - - for item in self.original: - o = _remove(item, received) - if not o: - errors.missing += item - elif item != o: - errors.wrong_order.append([item, o]) - for l in received: - errors.extra += l - return errors or None - - -class SkipIfNoTransportURL(test_utils.BaseTestCase): - def setUp(self, conf=cfg.CONF): - super(SkipIfNoTransportURL, self).setUp(conf=conf) - - driver = os.environ.get("TRANSPORT_DRIVER") - if driver: - self.url = os.environ.get('PIFPAF_URL') - if driver == "pika" and self.url: - self.url = self.url.replace("rabbit://", "pika://") - else: - self.url = os.environ.get('TRANSPORT_URL') - - if not self.url: - self.skipTest("No transport url configured") - - zmq_matchmaker = os.environ.get('ZMQ_MATCHMAKER') - if zmq_matchmaker: - self.config(rpc_zmq_matchmaker=zmq_matchmaker, - group="oslo_messaging_zmq") - zmq_ipc_dir = os.environ.get('ZMQ_IPC_DIR') - if zmq_ipc_dir: - self.config(group="oslo_messaging_zmq", - rpc_zmq_ipc_dir=zmq_ipc_dir) - zmq_redis_port = os.environ.get('ZMQ_REDIS_PORT') - if zmq_redis_port: - self.config(port=zmq_redis_port, group="matchmaker_redis") - self.config(check_timeout=10000, group="matchmaker_redis") - self.config(wait_timeout=1000, group="matchmaker_redis") - zmq_use_pub_sub = os.environ.get('ZMQ_USE_PUB_SUB') - if zmq_use_pub_sub: - self.config(use_pub_sub=zmq_use_pub_sub, - group='oslo_messaging_zmq') - zmq_use_router_proxy = os.environ.get('ZMQ_USE_ROUTER_PROXY') - if zmq_use_router_proxy: - self.config(use_router_proxy=zmq_use_router_proxy, - group='oslo_messaging_zmq') - - -class NotificationFixture(fixtures.Fixture): - def __init__(self, conf, url, topics, batch=None): - super(NotificationFixture, self).__init__() - self.conf = conf - self.url = url - self.topics = topics - self.events = moves.queue.Queue() - self.name = str(id(self)) - self.batch = batch - - def setUp(self): - super(NotificationFixture, self).setUp() - targets = [oslo_messaging.Target(topic=t) for t in self.topics] - # add a special topic for internal notifications - targets.append(oslo_messaging.Target(topic=self.name)) - transport = self.useFixture(TransportFixture(self.conf, self.url)) - self.server = self._get_server(transport, targets) - self._ctrl = self.notifier('internal', topic=self.name) - self._start() - transport.wait() - - def cleanUp(self): - self._stop() - super(NotificationFixture, self).cleanUp() - - def _get_server(self, transport, targets): - return oslo_messaging.get_notification_listener( - transport.transport, - targets, - [self], 'eventlet') - - def _start(self): - self.thread = test_utils.ServerThreadHelper(self.server) - self.thread.start() - - def _stop(self): - self.thread.stop() - self._ctrl.sample({}, 'shutdown', 'shutdown') - self.thread.join() - - def notifier(self, publisher, topic=None): - transport = self.useFixture(TransportFixture(self.conf, self.url)) - n = notifier.Notifier(transport.transport, - publisher, - driver='messaging', - topic=topic or self.topics[0]) - transport.wait() - return n - - def debug(self, ctxt, publisher, event_type, payload, metadata): - self.events.put(['debug', event_type, payload, publisher]) - - def audit(self, ctxt, publisher, event_type, payload, metadata): - self.events.put(['audit', event_type, payload, publisher]) - - def info(self, ctxt, publisher, event_type, payload, metadata): - self.events.put(['info', event_type, payload, publisher]) - - def warn(self, ctxt, publisher, event_type, payload, metadata): - self.events.put(['warn', event_type, payload, publisher]) - - def error(self, ctxt, publisher, event_type, payload, metadata): - self.events.put(['error', event_type, payload, publisher]) - - def critical(self, ctxt, publisher, event_type, payload, metadata): - self.events.put(['critical', event_type, payload, publisher]) - - def sample(self, ctxt, publisher, event_type, payload, metadata): - pass # Just used for internal shutdown control - - def get_events(self, timeout=0.5): - results = [] - try: - while True: - results.append(self.events.get(timeout=timeout)) - except moves.queue.Empty: - pass - return results - - -class BatchNotificationFixture(NotificationFixture): - def __init__(self, conf, url, topics, batch_size=5, batch_timeout=2): - super(BatchNotificationFixture, self).__init__(conf, url, topics) - self.batch_size = batch_size - self.batch_timeout = batch_timeout - - def _get_server(self, transport, targets): - return oslo_messaging.get_batch_notification_listener( - transport.transport, - targets, - [self], 'eventlet', - batch_timeout=self.batch_timeout, - batch_size=self.batch_size) - - def debug(self, messages): - self.events.put(['debug', messages]) - - def audit(self, messages): - self.events.put(['audit', messages]) - - def info(self, messages): - self.events.put(['info', messages]) - - def warn(self, messages): - self.events.put(['warn', messages]) - - def error(self, messages): - self.events.put(['error', messages]) - - def critical(self, messages): - self.events.put(['critical', messages]) - - def sample(self, messages): - pass # Just used for internal shutdown control diff --git a/oslo_messaging/tests/functional/zmq/__init__.py b/oslo_messaging/tests/functional/zmq/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/functional/zmq/multiproc_utils.py b/oslo_messaging/tests/functional/zmq/multiproc_utils.py deleted file mode 100644 index 4a1498a..0000000 --- a/oslo_messaging/tests/functional/zmq/multiproc_utils.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import logging.handlers -import multiprocessing -import os -import sys -import threading -import time -import uuid - -from oslo_config import cfg - -import oslo_messaging -from oslo_messaging._drivers.zmq_driver import zmq_async -from oslo_messaging.tests.functional import utils - - -zmq = zmq_async.import_zmq() - -LOG = logging.getLogger(__name__) - - -class QueueHandler(logging.Handler): - """This is a logging handler which sends events to a multiprocessing queue. - - The plan is to add it to Python 3.2, but this can be copy pasted into - user code for use with earlier Python versions. - """ - - def __init__(self, queue): - """Initialise an instance, using the passed queue.""" - logging.Handler.__init__(self) - self.queue = queue - - def emit(self, record): - """Emit a record. - - Writes the LogRecord to the queue. - """ - try: - ei = record.exc_info - if ei: - # just to get traceback text into record.exc_text - dummy = self.format(record) # noqa - record.exc_info = None # not needed any more - self.queue.put_nowait(record) - except (KeyboardInterrupt, SystemExit): - raise - except Exception: - self.handleError(record) - - -def listener_configurer(conf): - root = logging.getLogger() - h = logging.StreamHandler(sys.stdout) - f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s ' - '%(levelname)-8s %(message)s') - h.setFormatter(f) - root.addHandler(h) - log_path = conf.oslo_messaging_zmq.rpc_zmq_ipc_dir + \ - "/" + "zmq_multiproc.log" - file_handler = logging.StreamHandler(open(log_path, 'w')) - file_handler.setFormatter(f) - root.addHandler(file_handler) - - -def server_configurer(queue): - h = QueueHandler(queue) - root = logging.getLogger() - root.addHandler(h) - root.setLevel(logging.DEBUG) - - -def listener_thread(queue, configurer, conf): - configurer(conf) - while True: - time.sleep(0.3) - try: - record = queue.get() - if record is None: - break - logger = logging.getLogger(record.name) - logger.handle(record) - except (KeyboardInterrupt, SystemExit): - raise - - -class Client(oslo_messaging.RPCClient): - - def __init__(self, transport, topic): - super(Client, self).__init__( - transport=transport, target=oslo_messaging.Target(topic=topic)) - self.replies = [] - - def call_a(self): - LOG.warning("call_a - client side") - rep = self.call({}, 'call_a') - LOG.warning("after call_a - client side") - self.replies.append(rep) - return rep - - -class ReplyServerEndpoint(object): - - def call_a(self, *args, **kwargs): - LOG.warning("call_a - Server endpoint reached!") - return "OK" - - -class Server(object): - - def __init__(self, conf, log_queue, transport_url, name, topic=None): - self.conf = conf - self.log_queue = log_queue - self.transport_url = transport_url - self.name = name - self.topic = topic or str(uuid.uuid4()) - self.ready = multiprocessing.Value('b', False) - self._stop = multiprocessing.Event() - - def start(self): - self.process = multiprocessing.Process(target=self._run_server, - name=self.name, - args=(self.conf, - self.transport_url, - self.log_queue, - self.ready)) - self.process.start() - LOG.debug("Server process started: pid: %d", self.process.pid) - - def _run_server(self, conf, url, log_queue, ready): - server_configurer(log_queue) - LOG.debug("Starting RPC server") - - transport = oslo_messaging.get_transport(conf, url=url) - target = oslo_messaging.Target(topic=self.topic, server=self.name) - self.rpc_server = oslo_messaging.get_rpc_server( - transport=transport, target=target, - endpoints=[ReplyServerEndpoint()], - executor='eventlet') - self.rpc_server.start() - ready.value = True - LOG.debug("RPC server being started") - while not self._stop.is_set(): - LOG.debug("Waiting for the stop signal ...") - time.sleep(1) - self.rpc_server.stop() - LOG.debug("Leaving process T:%s Pid:%d", str(target), os.getpid()) - - def cleanup(self): - LOG.debug("Stopping server") - self.shutdown() - - def shutdown(self): - self._stop.set() - - def restart(self, time_for_restart=1): - pass - - def hang(self): - pass - - def crash(self): - pass - - def ping(self): - pass - - -class MutliprocTestCase(utils.SkipIfNoTransportURL): - - def setUp(self): - super(MutliprocTestCase, self).setUp(conf=cfg.ConfigOpts()) - - if not self.url.startswith("zmq:"): - self.skipTest("ZeroMQ specific skipped ...") - - self.transport = oslo_messaging.get_transport(self.conf, url=self.url) - - LOG.debug("Start log queue") - - self.log_queue = multiprocessing.Queue() - self.log_listener = threading.Thread(target=listener_thread, - args=(self.log_queue, - listener_configurer, - self.conf)) - self.log_listener.start() - self.spawned = [] - - self.conf.prog = "test_prog" - self.conf.project = "test_project" - - def tearDown(self): - super(MutliprocTestCase, self).tearDown() - for process in self.spawned: - process.cleanup() - - def get_client(self, topic): - return Client(self.transport, topic) - - def spawn_server(self, name, wait_for_server=False, topic=None): - srv = Server(self.conf, self.log_queue, self.url, name, topic) - LOG.debug("[SPAWN] %s (starting)...", srv.name) - srv.start() - if wait_for_server: - while not srv.ready.value: - LOG.debug("[SPAWN] %s (waiting for server ready)...", - srv.name) - time.sleep(1) - LOG.debug("[SPAWN] Server %s:%d started.", srv.name, srv.process.pid) - self.spawned.append(srv) - return srv - - def spawn_servers(self, number, wait_for_server=False, random_topic=True): - common_topic = str(uuid.uuid4()) if random_topic else None - names = ["server_%i_%s" % (i, str(uuid.uuid4())[:8]) - for i in range(number)] - for name in names: - server = self.spawn_server(name, wait_for_server, common_topic) - self.spawned.append(server) diff --git a/oslo_messaging/tests/functional/zmq/test_startup.py b/oslo_messaging/tests/functional/zmq/test_startup.py deleted file mode 100644 index f1b89b0..0000000 --- a/oslo_messaging/tests/functional/zmq/test_startup.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import sys - -from oslo_messaging.tests.functional.zmq import multiproc_utils - - -LOG = logging.getLogger(__name__) - - -class StartupOrderTestCase(multiproc_utils.MutliprocTestCase): - - def setUp(self): - super(StartupOrderTestCase, self).setUp() - - self.conf.prog = "test_prog" - self.conf.project = "test_project" - - self.config(rpc_response_timeout=30) - - log_path = os.path.join(self.conf.oslo_messaging_zmq.rpc_zmq_ipc_dir, - str(os.getpid()) + ".log") - sys.stdout = open(log_path, "w", buffering=0) - - def test_call_server_before_client(self): - self.spawn_servers(3, wait_for_server=True, random_topic=False) - servers = self.spawned - client = self.get_client(servers[0].topic) - for i in range(3): - reply = client.call_a() - self.assertIsNotNone(reply) - self.assertEqual(3, len(client.replies)) - - def test_call_client_dont_wait_for_server(self): - self.spawn_servers(3, wait_for_server=False, random_topic=False) - servers = self.spawned - client = self.get_client(servers[0].topic) - for i in range(3): - reply = client.call_a() - self.assertIsNotNone(reply) - self.assertEqual(3, len(client.replies)) diff --git a/oslo_messaging/tests/notify/__init__.py b/oslo_messaging/tests/notify/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/notify/test_dispatcher.py b/oslo_messaging/tests/notify/test_dispatcher.py deleted file mode 100644 index 18744fa..0000000 --- a/oslo_messaging/tests/notify/test_dispatcher.py +++ /dev/null @@ -1,239 +0,0 @@ - -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -import testscenarios - -import oslo_messaging -from oslo_messaging.notify import dispatcher as notify_dispatcher -from oslo_messaging.tests import utils as test_utils -from six.moves import mock - -load_tests = testscenarios.load_tests_apply_scenarios - -notification_msg = dict( - publisher_id="publisher_id", - event_type="compute.start", - payload={"info": "fuu"}, - message_id="uuid", - timestamp=str(timeutils.utcnow()) -) - - -class TestDispatcher(test_utils.BaseTestCase): - - scenarios = [ - ('no_endpoints', - dict(endpoints=[], - endpoints_expect_calls=[], - priority='info', - ex=None, - return_value=oslo_messaging.NotificationResult.HANDLED)), - ('one_endpoints', - dict(endpoints=[['warn']], - endpoints_expect_calls=['warn'], - priority='warn', - ex=None, - return_value=oslo_messaging.NotificationResult.HANDLED)), - ('two_endpoints_only_one_match', - dict(endpoints=[['warn'], ['info']], - endpoints_expect_calls=[None, 'info'], - priority='info', - ex=None, - return_value=oslo_messaging.NotificationResult.HANDLED)), - ('two_endpoints_both_match', - dict(endpoints=[['debug', 'info'], ['info', 'debug']], - endpoints_expect_calls=['debug', 'debug'], - priority='debug', - ex=None, - return_value=oslo_messaging.NotificationResult.HANDLED)), - ('no_return_value', - dict(endpoints=[['warn']], - endpoints_expect_calls=['warn'], - priority='warn', - ex=None, return_value=None)), - ('requeue', - dict(endpoints=[['debug', 'warn']], - endpoints_expect_calls=['debug'], - priority='debug', msg=notification_msg, - ex=None, - return_value=oslo_messaging.NotificationResult.REQUEUE)), - ('exception', - dict(endpoints=[['debug', 'warn']], - endpoints_expect_calls=['debug'], - priority='debug', msg=notification_msg, - ex=Exception, - return_value=oslo_messaging.NotificationResult.HANDLED)), - ] - - def test_dispatcher(self): - endpoints = [] - for endpoint_methods in self.endpoints: - e = mock.Mock(spec=endpoint_methods) - endpoints.append(e) - for m in endpoint_methods: - method = getattr(e, m) - if self.ex: - method.side_effect = self.ex() - else: - method.return_value = self.return_value - - msg = notification_msg.copy() - msg['priority'] = self.priority - - dispatcher = notify_dispatcher.NotificationDispatcher(endpoints, None) - - incoming = mock.Mock(ctxt={}, message=msg) - - res = dispatcher.dispatch(incoming) - - expected_res = ( - notify_dispatcher.NotificationResult.REQUEUE - if (self.return_value == - notify_dispatcher.NotificationResult.REQUEUE or - self.ex is not None) - else notify_dispatcher.NotificationResult.HANDLED - ) - - self.assertEqual(expected_res, res) - - # check endpoint callbacks are called or not - for i, endpoint_methods in enumerate(self.endpoints): - for m in endpoint_methods: - if m == self.endpoints_expect_calls[i]: - method = getattr(endpoints[i], m) - method.assert_called_once_with( - {}, - msg['publisher_id'], - msg['event_type'], - msg['payload'], { - 'timestamp': mock.ANY, - 'message_id': mock.ANY - }) - else: - self.assertEqual(0, endpoints[i].call_count) - - @mock.patch('oslo_messaging.notify.dispatcher.LOG') - def test_dispatcher_unknown_prio(self, mylog): - msg = notification_msg.copy() - msg['priority'] = 'what???' - dispatcher = notify_dispatcher.NotificationDispatcher( - [mock.Mock()], None) - res = dispatcher.dispatch(mock.Mock(ctxt={}, message=msg)) - self.assertIsNone(res) - mylog.warning.assert_called_once_with('Unknown priority "%s"', - 'what???') - - -class TestDispatcherFilter(test_utils.BaseTestCase): - scenarios = [ - ('publisher_id_match', - dict(filter_rule=dict(publisher_id='^compute.*'), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=True)), - ('publisher_id_nomatch', - dict(filter_rule=dict(publisher_id='^compute.*'), - publisher_id='network01.manager', - event_type='instance.create.start', - context={}, - match=False)), - ('event_type_match', - dict(filter_rule=dict(event_type='^instance\.create'), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=True)), - ('event_type_nomatch', - dict(filter_rule=dict(event_type='^instance\.delete'), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=False)), - ('context_match', - dict(filter_rule=dict(context={'user': '^adm'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={'user': 'admin'}, - match=True)), - ('context_key_missing', - dict(filter_rule=dict(context={'user': '^adm'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={'project': 'admin'}, - metadata={}, - match=False)), - ('metadata_match', - dict(filter_rule=dict(metadata={'message_id': '^99'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=True)), - ('metadata_key_missing', - dict(filter_rule=dict(metadata={'user': '^adm'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=False)), - ('payload_match', - dict(filter_rule=dict(payload={'state': '^active$'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=True)), - ('payload_no_match', - dict(filter_rule=dict(payload={'state': '^deleted$'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=False)), - ('payload_key_missing', - dict(filter_rule=dict(payload={'user': '^adm'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={}, - match=False)), - ('mix_match', - dict(filter_rule=dict(event_type='^instance\.create', - publisher_id='^compute', - context={'user': '^adm'}), - publisher_id='compute01.manager', - event_type='instance.create.start', - context={'user': 'admin'}, - match=True)), - - ] - - def test_filters(self): - notification_filter = oslo_messaging.NotificationFilter( - **self.filter_rule) - endpoint = mock.Mock(spec=['info'], filter_rule=notification_filter) - - dispatcher = notify_dispatcher.NotificationDispatcher( - [endpoint], serializer=None) - message = {'payload': {'state': 'active'}, - 'priority': 'info', - 'publisher_id': self.publisher_id, - 'event_type': self.event_type, - 'timestamp': '2014-03-03 18:21:04.369234', - 'message_id': '99863dda-97f0-443a-a0c1-6ed317b7fd45'} - incoming = mock.Mock(ctxt=self.context, message=message) - dispatcher.dispatch(incoming) - - if self.match: - self.assertEqual(1, endpoint.info.call_count) - else: - self.assertEqual(0, endpoint.info.call_count) diff --git a/oslo_messaging/tests/notify/test_impl_messaging.py b/oslo_messaging/tests/notify/test_impl_messaging.py deleted file mode 100644 index 3a8eacb..0000000 --- a/oslo_messaging/tests/notify/test_impl_messaging.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_messaging.tests import utils as test_utils - - -class TestDeprecationWarning(test_utils.BaseTestCase): - - @mock.patch('warnings.warn') - def test_impl_messaging_deprecation_warning(self, mock_warn): - # Tests that we get a deprecation warning when loading a messaging - # driver out of oslo_messaging.notify._impl_messaging. - from oslo_messaging.notify import _impl_messaging as messaging - driver = messaging.MessagingV2Driver( # noqa - conf={}, topics=['notifications'], transport='rpc') - # Make sure we got a deprecation warning by loading from the alias - self.assertEqual(1, mock_warn.call_count) diff --git a/oslo_messaging/tests/notify/test_listener.py b/oslo_messaging/tests/notify/test_listener.py deleted file mode 100644 index 8300e84..0000000 --- a/oslo_messaging/tests/notify/test_listener.py +++ /dev/null @@ -1,502 +0,0 @@ - -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from oslo_config import cfg -import testscenarios - -import oslo_messaging -from oslo_messaging.notify import dispatcher -from oslo_messaging.notify import notifier as msg_notifier -from oslo_messaging.tests import utils as test_utils -import six -from six.moves import mock - -load_tests = testscenarios.load_tests_apply_scenarios - - -class RestartableServerThread(object): - def __init__(self, server): - self.server = server - self.thread = None - - def start(self): - if self.thread is None: - self.thread = test_utils.ServerThreadHelper(self.server) - self.thread.start() - - def stop(self): - if self.thread is not None: - self.thread.stop() - self.thread.join(timeout=15) - ret = self.thread.isAlive() - self.thread = None - return ret - return True - - -class ListenerSetupMixin(object): - - class ThreadTracker(object): - def __init__(self): - self._received_msgs = 0 - self.threads = [] - self.lock = threading.Condition() - - def info(self, *args, **kwargs): - # NOTE(sileht): this run into an other thread - with self.lock: - self._received_msgs += 1 - self.lock.notify_all() - - def wait_for_messages(self, expect_messages): - with self.lock: - while self._received_msgs < expect_messages: - self.lock.wait() - - def stop(self): - for thread in self.threads: - thread.stop() - self.threads = [] - - def start(self, thread): - self.threads.append(thread) - thread.start() - - def setUp(self): - self.trackers = {} - self.addCleanup(self._stop_trackers) - - def _stop_trackers(self): - for pool in self.trackers: - self.trackers[pool].stop() - self.trackers = {} - - def _setup_listener(self, transport, endpoints, - targets=None, pool=None, batch=False): - - if pool is None: - tracker_name = '__default__' - else: - tracker_name = pool - - if targets is None: - targets = [oslo_messaging.Target(topic='testtopic')] - - tracker = self.trackers.setdefault( - tracker_name, self.ThreadTracker()) - if batch: - listener = oslo_messaging.get_batch_notification_listener( - transport, targets=targets, endpoints=[tracker] + endpoints, - allow_requeue=True, pool=pool, executor='eventlet', - batch_size=batch[0], batch_timeout=batch[1]) - else: - listener = oslo_messaging.get_notification_listener( - transport, targets=targets, endpoints=[tracker] + endpoints, - allow_requeue=True, pool=pool, executor='eventlet') - - thread = RestartableServerThread(listener) - tracker.start(thread) - return thread - - def wait_for_messages(self, expect_messages, tracker_name='__default__'): - self.trackers[tracker_name].wait_for_messages(expect_messages) - - def _setup_notifier(self, transport, topic='testtopic', - publisher_id='testpublisher'): - return oslo_messaging.Notifier(transport, topic=topic, - driver='messaging', - publisher_id=publisher_id) - - -class TestNotifyListener(test_utils.BaseTestCase, ListenerSetupMixin): - - def __init__(self, *args): - super(TestNotifyListener, self).__init__(*args) - ListenerSetupMixin.__init__(self) - - def setUp(self): - super(TestNotifyListener, self).setUp(conf=cfg.ConfigOpts()) - ListenerSetupMixin.setUp(self) - - def test_constructor(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - target = oslo_messaging.Target(topic='foo') - endpoints = [object()] - - listener = oslo_messaging.get_notification_listener( - transport, [target], endpoints) - - self.assertIs(listener.conf, self.conf) - self.assertIs(listener.transport, transport) - self.assertIsInstance(listener.dispatcher, - dispatcher.NotificationDispatcher) - self.assertIs(listener.dispatcher.endpoints, endpoints) - self.assertEqual('blocking', listener.executor_type) - - def test_no_target_topic(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - listener = oslo_messaging.get_notification_listener( - transport, - [oslo_messaging.Target()], - [mock.Mock()]) - try: - listener.start() - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) - else: - self.assertTrue(False) - - def test_unknown_executor(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - try: - oslo_messaging.get_notification_listener(transport, [], [], - executor='foo') - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) - self.assertEqual('foo', ex.executor) - else: - self.assertTrue(False) - - def test_batch_timeout(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - endpoint = mock.Mock() - endpoint.info.return_value = None - listener_thread = self._setup_listener(transport, [endpoint], - batch=(5, 1)) - - notifier = self._setup_notifier(transport) - for i in six.moves.range(12): - notifier.info({}, 'an_event.start', 'test message') - - self.wait_for_messages(3) - self.assertFalse(listener_thread.stop()) - - messages = [dict(ctxt={}, - publisher_id='testpublisher', - event_type='an_event.start', - payload='test message', - metadata={'message_id': mock.ANY, - 'timestamp': mock.ANY})] - - endpoint.info.assert_has_calls([mock.call(messages * 5), - mock.call(messages * 5), - mock.call(messages * 2)]) - - def test_batch_size(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - endpoint = mock.Mock() - endpoint.info.return_value = None - listener_thread = self._setup_listener(transport, [endpoint], - batch=(5, None)) - - notifier = self._setup_notifier(transport) - for i in six.moves.range(10): - notifier.info({}, 'an_event.start', 'test message') - - self.wait_for_messages(2) - self.assertFalse(listener_thread.stop()) - - messages = [dict(ctxt={}, - publisher_id='testpublisher', - event_type='an_event.start', - payload='test message', - metadata={'message_id': mock.ANY, - 'timestamp': mock.ANY})] - - endpoint.info.assert_has_calls([mock.call(messages * 5), - mock.call(messages * 5)]) - - def test_batch_size_exception_path(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - endpoint = mock.Mock() - endpoint.info.side_effect = [None, Exception('boom!')] - listener_thread = self._setup_listener(transport, [endpoint], - batch=(5, None)) - - notifier = self._setup_notifier(transport) - for i in six.moves.range(10): - notifier.info({}, 'an_event.start', 'test message') - - self.wait_for_messages(2) - self.assertFalse(listener_thread.stop()) - - messages = [dict(ctxt={}, - publisher_id='testpublisher', - event_type='an_event.start', - payload='test message', - metadata={'message_id': mock.ANY, - 'timestamp': mock.ANY})] - - endpoint.info.assert_has_calls([mock.call(messages * 5)]) - - def test_one_topic(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - endpoint = mock.Mock() - endpoint.info.return_value = None - listener_thread = self._setup_listener(transport, [endpoint]) - - notifier = self._setup_notifier(transport) - notifier.info({}, 'an_event.start', 'test message') - - self.wait_for_messages(1) - self.assertFalse(listener_thread.stop()) - - endpoint.info.assert_called_once_with( - {}, 'testpublisher', 'an_event.start', 'test message', - {'message_id': mock.ANY, 'timestamp': mock.ANY}) - - def test_two_topics(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - endpoint = mock.Mock() - endpoint.info.return_value = None - targets = [oslo_messaging.Target(topic="topic1"), - oslo_messaging.Target(topic="topic2")] - listener_thread = self._setup_listener(transport, [endpoint], - targets=targets) - notifier = self._setup_notifier(transport, topic='topic1') - notifier.info({'ctxt': '1'}, 'an_event.start1', 'test') - notifier = self._setup_notifier(transport, topic='topic2') - notifier.info({'ctxt': '2'}, 'an_event.start2', 'test') - - self.wait_for_messages(2) - self.assertFalse(listener_thread.stop()) - - endpoint.info.assert_has_calls([ - mock.call({'ctxt': '1'}, 'testpublisher', - 'an_event.start1', 'test', - {'timestamp': mock.ANY, 'message_id': mock.ANY}), - mock.call({'ctxt': '2'}, 'testpublisher', - 'an_event.start2', 'test', - {'timestamp': mock.ANY, 'message_id': mock.ANY})], - any_order=True) - - def test_two_exchanges(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - endpoint = mock.Mock() - endpoint.info.return_value = None - targets = [oslo_messaging.Target(topic="topic", - exchange="exchange1"), - oslo_messaging.Target(topic="topic", - exchange="exchange2")] - listener_thread = self._setup_listener(transport, [endpoint], - targets=targets) - - notifier = self._setup_notifier(transport, topic="topic") - - def mock_notifier_exchange(name): - def side_effect(target, ctxt, message, version, retry): - target.exchange = name - return transport._driver.send_notification(target, ctxt, - message, version, - retry=retry) - transport._send_notification = mock.MagicMock( - side_effect=side_effect) - - notifier.info({'ctxt': '0'}, - 'an_event.start', 'test message default exchange') - mock_notifier_exchange('exchange1') - notifier.info({'ctxt': '1'}, - 'an_event.start', 'test message exchange1') - mock_notifier_exchange('exchange2') - notifier.info({'ctxt': '2'}, - 'an_event.start', 'test message exchange2') - - self.wait_for_messages(2) - self.assertFalse(listener_thread.stop()) - - endpoint.info.assert_has_calls([ - mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start', - 'test message exchange1', - {'timestamp': mock.ANY, 'message_id': mock.ANY}), - mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start', - 'test message exchange2', - {'timestamp': mock.ANY, 'message_id': mock.ANY})], - any_order=True) - - def test_two_endpoints(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - endpoint1 = mock.Mock() - endpoint1.info.return_value = None - endpoint2 = mock.Mock() - endpoint2.info.return_value = oslo_messaging.NotificationResult.HANDLED - listener_thread = self._setup_listener(transport, - [endpoint1, endpoint2]) - notifier = self._setup_notifier(transport) - notifier.info({}, 'an_event.start', 'test') - - self.wait_for_messages(1) - self.assertFalse(listener_thread.stop()) - - endpoint1.info.assert_called_once_with( - {}, 'testpublisher', 'an_event.start', 'test', { - 'timestamp': mock.ANY, - 'message_id': mock.ANY}) - - endpoint2.info.assert_called_once_with( - {}, 'testpublisher', 'an_event.start', 'test', { - 'timestamp': mock.ANY, - 'message_id': mock.ANY}) - - def test_requeue(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - endpoint = mock.Mock() - endpoint.info = mock.Mock() - - def side_effect_requeue(*args, **kwargs): - if endpoint.info.call_count == 1: - return oslo_messaging.NotificationResult.REQUEUE - return oslo_messaging.NotificationResult.HANDLED - - endpoint.info.side_effect = side_effect_requeue - listener_thread = self._setup_listener(transport, [endpoint]) - notifier = self._setup_notifier(transport) - notifier.info({}, 'an_event.start', 'test') - - self.wait_for_messages(2) - self.assertFalse(listener_thread.stop()) - - endpoint.info.assert_has_calls([ - mock.call({}, 'testpublisher', 'an_event.start', 'test', - {'timestamp': mock.ANY, 'message_id': mock.ANY}), - mock.call({}, 'testpublisher', 'an_event.start', 'test', - {'timestamp': mock.ANY, 'message_id': mock.ANY})]) - - def test_two_pools(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - endpoint1 = mock.Mock() - endpoint1.info.return_value = None - endpoint2 = mock.Mock() - endpoint2.info.return_value = None - - targets = [oslo_messaging.Target(topic="topic")] - listener1_thread = self._setup_listener(transport, [endpoint1], - targets=targets, pool="pool1") - listener2_thread = self._setup_listener(transport, [endpoint2], - targets=targets, pool="pool2") - - notifier = self._setup_notifier(transport, topic="topic") - notifier.info({'ctxt': '0'}, 'an_event.start', 'test message0') - notifier.info({'ctxt': '1'}, 'an_event.start', 'test message1') - - self.wait_for_messages(2, "pool1") - self.wait_for_messages(2, "pool2") - self.assertFalse(listener2_thread.stop()) - self.assertFalse(listener1_thread.stop()) - - def mocked_endpoint_call(i): - return mock.call({'ctxt': '%d' % i}, 'testpublisher', - 'an_event.start', 'test message%d' % i, - {'timestamp': mock.ANY, 'message_id': mock.ANY}) - - endpoint1.info.assert_has_calls([mocked_endpoint_call(0), - mocked_endpoint_call(1)]) - endpoint2.info.assert_has_calls([mocked_endpoint_call(0), - mocked_endpoint_call(1)]) - - def test_two_pools_three_listener(self): - transport = msg_notifier.get_notification_transport( - self.conf, url='fake:') - - endpoint1 = mock.Mock() - endpoint1.info.return_value = None - endpoint2 = mock.Mock() - endpoint2.info.return_value = None - endpoint3 = mock.Mock() - endpoint3.info.return_value = None - - targets = [oslo_messaging.Target(topic="topic")] - listener1_thread = self._setup_listener(transport, [endpoint1], - targets=targets, pool="pool1") - listener2_thread = self._setup_listener(transport, [endpoint2], - targets=targets, pool="pool2") - listener3_thread = self._setup_listener(transport, [endpoint3], - targets=targets, pool="pool2") - - def mocked_endpoint_call(i): - return mock.call({'ctxt': '%d' % i}, 'testpublisher', - 'an_event.start', 'test message%d' % i, - {'timestamp': mock.ANY, 'message_id': mock.ANY}) - - notifier = self._setup_notifier(transport, topic="topic") - mocked_endpoint1_calls = [] - for i in range(0, 25): - notifier.info({'ctxt': '%d' % i}, 'an_event.start', - 'test message%d' % i) - mocked_endpoint1_calls.append(mocked_endpoint_call(i)) - - self.wait_for_messages(25, 'pool2') - listener2_thread.stop() - - for i in range(0, 25): - notifier.info({'ctxt': '%d' % i}, 'an_event.start', - 'test message%d' % i) - mocked_endpoint1_calls.append(mocked_endpoint_call(i)) - - self.wait_for_messages(50, 'pool2') - listener2_thread.start() - listener3_thread.stop() - - for i in range(0, 25): - notifier.info({'ctxt': '%d' % i}, 'an_event.start', - 'test message%d' % i) - mocked_endpoint1_calls.append(mocked_endpoint_call(i)) - - self.wait_for_messages(75, 'pool2') - listener3_thread.start() - - for i in range(0, 25): - notifier.info({'ctxt': '%d' % i}, 'an_event.start', - 'test message%d' % i) - mocked_endpoint1_calls.append(mocked_endpoint_call(i)) - - self.wait_for_messages(100, 'pool1') - self.wait_for_messages(100, 'pool2') - - self.assertFalse(listener3_thread.stop()) - self.assertFalse(listener2_thread.stop()) - self.assertFalse(listener1_thread.stop()) - - self.assertEqual(100, endpoint1.info.call_count) - endpoint1.info.assert_has_calls(mocked_endpoint1_calls) - - self.assertLessEqual(25, endpoint2.info.call_count) - self.assertLessEqual(25, endpoint3.info.call_count) - - self.assertEqual(100, endpoint2.info.call_count + - endpoint3.info.call_count) - for call in mocked_endpoint1_calls: - self.assertIn(call, endpoint2.info.mock_calls + - endpoint3.info.mock_calls) diff --git a/oslo_messaging/tests/notify/test_log_handler.py b/oslo_messaging/tests/notify/test_log_handler.py deleted file mode 100644 index 1851321..0000000 --- a/oslo_messaging/tests/notify/test_log_handler.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import oslo_messaging -from oslo_messaging.notify import log_handler -from oslo_messaging.tests.notify import test_notifier -from oslo_messaging.tests import utils as test_utils -from six.moves import mock - - -class PublishErrorsHandlerTestCase(test_utils.BaseTestCase): - """Tests for log.PublishErrorsHandler""" - def setUp(self): - super(PublishErrorsHandlerTestCase, self).setUp() - self.publisherrorshandler = (log_handler. - PublishErrorsHandler(logging.ERROR)) - - def test_emit_cfg_log_notifier_in_notifier_drivers(self): - drivers = ['messaging', 'log'] - self.config(driver=drivers, - group='oslo_messaging_notifications') - self.stub_flg = True - - transport = test_notifier._FakeTransport(self.conf) - notifier = oslo_messaging.Notifier(transport) - - def fake_notifier(*args, **kwargs): - self.stub_flg = False - - self.stubs.Set(notifier, 'error', fake_notifier) - - logrecord = logging.LogRecord(name='name', level='WARN', - pathname='/tmp', lineno=1, msg='Message', - args=None, exc_info=None) - self.publisherrorshandler.emit(logrecord) - self.assertTrue(self.stub_flg) - - @mock.patch('oslo_messaging.notify.notifier.Notifier._notify') - def test_emit_notification(self, mock_notify): - logrecord = logging.LogRecord(name='name', level='ERROR', - pathname='/tmp', lineno=1, msg='Message', - args=None, exc_info=None) - self.publisherrorshandler.emit(logrecord) - self.assertEqual('error.publisher', - self.publisherrorshandler._notifier.publisher_id) - mock_notify.assert_called_with({}, - 'error_notification', - {'error': 'Message'}, 'ERROR') diff --git a/oslo_messaging/tests/notify/test_logger.py b/oslo_messaging/tests/notify/test_logger.py deleted file mode 100644 index bf7e2d9..0000000 --- a/oslo_messaging/tests/notify/test_logger.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import logging -import logging.config -import os -import sys - -from oslo_utils import timeutils -import testscenarios - -import oslo_messaging -from oslo_messaging.tests.notify import test_notifier -from oslo_messaging.tests import utils as test_utils -from six.moves import mock - - -load_tests = testscenarios.load_tests_apply_scenarios - -# Stolen from openstack.common.logging -logging.AUDIT = logging.INFO + 1 -logging.addLevelName(logging.AUDIT, 'AUDIT') - - -class TestLogNotifier(test_utils.BaseTestCase): - - scenarios = [ - ('debug', dict(priority='debug')), - ('info', dict(priority='info')), - ('warning', dict(priority='warning', queue='WARN')), - ('warn', dict(priority='warn')), - ('error', dict(priority='error')), - ('critical', dict(priority='critical')), - ('audit', dict(priority='audit')), - ] - - def setUp(self): - super(TestLogNotifier, self).setUp() - self.addCleanup(oslo_messaging.notify._impl_test.reset) - self.config(driver=['test'], - group='oslo_messaging_notifications') - # NOTE(jamespage) disable thread information logging for testing - # as this causes test failures when zmq tests monkey_patch via - # eventlet - logging.logThreads = 0 - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_logger(self, mock_utcnow): - with mock.patch('oslo_messaging.transport.get_transport', - return_value=test_notifier._FakeTransport(self.conf)): - self.logger = oslo_messaging.LoggingNotificationHandler('test://') - - mock_utcnow.return_value = datetime.datetime.utcnow() - - levelno = getattr(logging, self.priority.upper(), 42) - - record = logging.LogRecord('foo', - levelno, - '/foo/bar', - 42, - 'Something happened', - None, - None) - - self.logger.emit(record) - - context = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][0] - self.assertEqual({}, context) - - n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1] - self.assertEqual(getattr(self, 'queue', self.priority.upper()), - n['priority']) - self.assertEqual('logrecord', n['event_type']) - self.assertEqual(str(timeutils.utcnow()), n['timestamp']) - self.assertIsNone(n['publisher_id']) - self.assertEqual( - {'process': os.getpid(), - 'funcName': None, - 'name': 'foo', - 'thread': None, - 'levelno': levelno, - 'processName': 'MainProcess', - 'pathname': '/foo/bar', - 'lineno': 42, - 'msg': 'Something happened', - 'exc_info': None, - 'levelname': logging.getLevelName(levelno), - 'extra': None}, - n['payload']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_logging_conf(self, mock_utcnow): - with mock.patch('oslo_messaging.transport.get_transport', - return_value=test_notifier._FakeTransport(self.conf)): - logging.config.dictConfig({ - 'version': 1, - 'handlers': { - 'notification': { - 'class': 'oslo_messaging.LoggingNotificationHandler', - 'level': self.priority.upper(), - 'url': 'test://', - }, - }, - 'loggers': { - 'default': { - 'handlers': ['notification'], - 'level': self.priority.upper(), - }, - }, - }) - - mock_utcnow.return_value = datetime.datetime.utcnow() - - levelno = getattr(logging, self.priority.upper()) - - logger = logging.getLogger('default') - lineno = sys._getframe().f_lineno + 1 - logger.log(levelno, 'foobar') - - n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1] - self.assertEqual(getattr(self, 'queue', self.priority.upper()), - n['priority']) - self.assertEqual('logrecord', n['event_type']) - self.assertEqual(str(timeutils.utcnow()), n['timestamp']) - self.assertIsNone(n['publisher_id']) - pathname = __file__ - if pathname.endswith(('.pyc', '.pyo')): - pathname = pathname[:-1] - self.assertDictEqual( - n['payload'], - {'process': os.getpid(), - 'funcName': 'test_logging_conf', - 'name': 'default', - 'thread': None, - 'levelno': levelno, - 'processName': 'MainProcess', - 'pathname': pathname, - 'lineno': lineno, - 'msg': 'foobar', - 'exc_info': None, - 'levelname': logging.getLevelName(levelno), - 'extra': None}) diff --git a/oslo_messaging/tests/notify/test_middleware.py b/oslo_messaging/tests/notify/test_middleware.py deleted file mode 100644 index f5deef3..0000000 --- a/oslo_messaging/tests/notify/test_middleware.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2013-2014 eNovance -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import webob - -from oslo_messaging.notify import middleware -from oslo_messaging.tests import utils -from six.moves import mock - - -class FakeApp(object): - def __call__(self, env, start_response): - body = 'Some response' - start_response('200 OK', [ - ('Content-Type', 'text/plain'), - ('Content-Length', str(sum(map(len, body)))) - ]) - return [body] - - -class FakeFailingApp(object): - def __call__(self, env, start_response): - raise Exception("It happens!") - - -class NotifierMiddlewareTest(utils.BaseTestCase): - - def test_notification(self): - m = middleware.RequestNotifier(FakeApp()) - req = webob.Request.blank('/foo/bar', - environ={'REQUEST_METHOD': 'GET', - 'HTTP_X_AUTH_TOKEN': uuid.uuid4()}) - with mock.patch( - 'oslo_messaging.notify.notifier.Notifier._notify') as notify: - m(req) - # Check first notification with only 'request' - call_args = notify.call_args_list[0][0] - self.assertEqual('http.request', call_args[1]) - self.assertEqual('INFO', call_args[3]) - self.assertEqual(set(['request']), - set(call_args[2].keys())) - - request = call_args[2]['request'] - self.assertEqual('/foo/bar', request['PATH_INFO']) - self.assertEqual('GET', request['REQUEST_METHOD']) - self.assertIn('HTTP_X_SERVICE_NAME', request) - self.assertNotIn('HTTP_X_AUTH_TOKEN', request) - self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), - request.keys())), - "WSGI fields are filtered out") - - # Check second notification with request + response - call_args = notify.call_args_list[1][0] - self.assertEqual('http.response', call_args[1]) - self.assertEqual('INFO', call_args[3]) - self.assertEqual(set(['request', 'response']), - set(call_args[2].keys())) - - request = call_args[2]['request'] - self.assertEqual('/foo/bar', request['PATH_INFO']) - self.assertEqual('GET', request['REQUEST_METHOD']) - self.assertIn('HTTP_X_SERVICE_NAME', request) - self.assertNotIn('HTTP_X_AUTH_TOKEN', request) - self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), - request.keys())), - "WSGI fields are filtered out") - - response = call_args[2]['response'] - self.assertEqual('200 OK', response['status']) - self.assertEqual('13', response['headers']['content-length']) - - def test_notification_response_failure(self): - m = middleware.RequestNotifier(FakeFailingApp()) - req = webob.Request.blank('/foo/bar', - environ={'REQUEST_METHOD': 'GET', - 'HTTP_X_AUTH_TOKEN': uuid.uuid4()}) - with mock.patch( - 'oslo_messaging.notify.notifier.Notifier._notify') as notify: - try: - m(req) - self.fail("Application exception has not been re-raised") - except Exception: - pass - # Check first notification with only 'request' - call_args = notify.call_args_list[0][0] - self.assertEqual('http.request', call_args[1]) - self.assertEqual('INFO', call_args[3]) - self.assertEqual(set(['request']), - set(call_args[2].keys())) - - request = call_args[2]['request'] - self.assertEqual('/foo/bar', request['PATH_INFO']) - self.assertEqual('GET', request['REQUEST_METHOD']) - self.assertIn('HTTP_X_SERVICE_NAME', request) - self.assertNotIn('HTTP_X_AUTH_TOKEN', request) - self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), - request.keys())), - "WSGI fields are filtered out") - - # Check second notification with 'request' and 'exception' - call_args = notify.call_args_list[1][0] - self.assertEqual('http.response', call_args[1]) - self.assertEqual('INFO', call_args[3]) - self.assertEqual(set(['request', 'exception']), - set(call_args[2].keys())) - - request = call_args[2]['request'] - self.assertEqual('/foo/bar', request['PATH_INFO']) - self.assertEqual('GET', request['REQUEST_METHOD']) - self.assertIn('HTTP_X_SERVICE_NAME', request) - self.assertNotIn('HTTP_X_AUTH_TOKEN', request) - self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), - request.keys())), - "WSGI fields are filtered out") - - exception = call_args[2]['exception'] - self.assertIn('middleware.py', exception['traceback'][0]) - self.assertIn('It happens!', exception['traceback'][-1]) - self.assertEqual("Exception('It happens!',)", exception['value']) - - def test_process_request_fail(self): - def notify_error(context, publisher_id, event_type, - priority, payload): - raise Exception('error') - with mock.patch('oslo_messaging.notify.notifier.Notifier._notify', - notify_error): - m = middleware.RequestNotifier(FakeApp()) - req = webob.Request.blank('/foo/bar', - environ={'REQUEST_METHOD': 'GET'}) - m.process_request(req) - - def test_process_response_fail(self): - def notify_error(context, publisher_id, event_type, - priority, payload): - raise Exception('error') - with mock.patch('oslo_messaging.notify.notifier.Notifier._notify', - notify_error): - m = middleware.RequestNotifier(FakeApp()) - req = webob.Request.blank('/foo/bar', - environ={'REQUEST_METHOD': 'GET'}) - m.process_response(req, webob.response.Response()) - - def test_ignore_req_opt(self): - m = middleware.RequestNotifier(FakeApp(), - ignore_req_list='get, PUT') - req = webob.Request.blank('/skip/foo', - environ={'REQUEST_METHOD': 'GET'}) - req1 = webob.Request.blank('/skip/foo', - environ={'REQUEST_METHOD': 'PUT'}) - req2 = webob.Request.blank('/accept/foo', - environ={'REQUEST_METHOD': 'POST'}) - with mock.patch( - 'oslo_messaging.notify.notifier.Notifier._notify') as notify: - # Check GET request does not send notification - m(req) - m(req1) - self.assertEqual(0, len(notify.call_args_list)) - - # Check non-GET request does send notification - m(req2) - self.assertEqual(2, len(notify.call_args_list)) - call_args = notify.call_args_list[0][0] - self.assertEqual('http.request', call_args[1]) - self.assertEqual('INFO', call_args[3]) - self.assertEqual(set(['request']), - set(call_args[2].keys())) - - request = call_args[2]['request'] - self.assertEqual('/accept/foo', request['PATH_INFO']) - self.assertEqual('POST', request['REQUEST_METHOD']) - - call_args = notify.call_args_list[1][0] - self.assertEqual('http.response', call_args[1]) - self.assertEqual('INFO', call_args[3]) - self.assertEqual(set(['request', 'response']), - set(call_args[2].keys())) diff --git a/oslo_messaging/tests/notify/test_notifier.py b/oslo_messaging/tests/notify/test_notifier.py deleted file mode 100644 index 92e8f64..0000000 --- a/oslo_messaging/tests/notify/test_notifier.py +++ /dev/null @@ -1,586 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import logging -import sys -import uuid - -import fixtures -from oslo_serialization import jsonutils -from oslo_utils import strutils -from oslo_utils import timeutils -from stevedore import dispatch -from stevedore import extension -import testscenarios -import yaml - -import oslo_messaging -from oslo_messaging.notify import _impl_log -from oslo_messaging.notify import _impl_test -from oslo_messaging.notify import messaging -from oslo_messaging.notify import notifier as msg_notifier -from oslo_messaging import serializer as msg_serializer -from oslo_messaging.tests import utils as test_utils -from six.moves import mock - -load_tests = testscenarios.load_tests_apply_scenarios - - -class _FakeTransport(object): - - def __init__(self, conf): - self.conf = conf - - def _send_notification(self, target, ctxt, message, version, retry=None): - pass - - -class _ReRaiseLoggedExceptionsFixture(fixtures.Fixture): - - """Record logged exceptions and re-raise in cleanup. - - The notifier just logs notification send errors so, for the sake of - debugging test failures, we record any exceptions logged and re-raise them - during cleanup. - """ - - class FakeLogger(object): - - def __init__(self): - self.exceptions = [] - - def exception(self, msg, *args, **kwargs): - self.exceptions.append(sys.exc_info()[1]) - - def setUp(self): - super(_ReRaiseLoggedExceptionsFixture, self).setUp() - - self.logger = self.FakeLogger() - - def reraise_exceptions(): - for ex in self.logger.exceptions: - raise ex - - self.addCleanup(reraise_exceptions) - - -class TestMessagingNotifier(test_utils.BaseTestCase): - - _v1 = [ - ('v1', dict(v1=True)), - ('not_v1', dict(v1=False)), - ] - - _v2 = [ - ('v2', dict(v2=True)), - ('not_v2', dict(v2=False)), - ] - - _publisher_id = [ - ('ctor_pub_id', dict(ctor_pub_id='test', - expected_pub_id='test')), - ('prep_pub_id', dict(prep_pub_id='test.localhost', - expected_pub_id='test.localhost')), - ('override', dict(ctor_pub_id='test', - prep_pub_id='test.localhost', - expected_pub_id='test.localhost')), - ] - - _topics = [ - ('no_topics', dict(topics=[])), - ('single_topic', dict(topics=['notifications'])), - ('multiple_topic2', dict(topics=['foo', 'bar'])), - ] - - _priority = [ - ('audit', dict(priority='audit')), - ('debug', dict(priority='debug')), - ('info', dict(priority='info')), - ('warn', dict(priority='warn')), - ('error', dict(priority='error')), - ('sample', dict(priority='sample')), - ('critical', dict(priority='critical')), - ] - - _payload = [ - ('payload', dict(payload={'foo': 'bar'})), - ] - - _context = [ - ('ctxt', dict(ctxt={'user': 'bob'})), - ] - - _retry = [ - ('unconfigured', dict()), - ('None', dict(retry=None)), - ('0', dict(retry=0)), - ('5', dict(retry=5)), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._v1, - cls._v2, - cls._publisher_id, - cls._topics, - cls._priority, - cls._payload, - cls._context, - cls._retry) - - def setUp(self): - super(TestMessagingNotifier, self).setUp() - - self.logger = self.useFixture(_ReRaiseLoggedExceptionsFixture()).logger - self.stubs.Set(messaging, 'LOG', self.logger) - self.stubs.Set(msg_notifier, '_LOG', self.logger) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_notifier(self, mock_utcnow): - drivers = [] - if self.v1: - drivers.append('messaging') - if self.v2: - drivers.append('messagingv2') - - self.config(driver=drivers, - topics=self.topics, - group='oslo_messaging_notifications') - - transport = _FakeTransport(self.conf) - - if hasattr(self, 'ctor_pub_id'): - notifier = oslo_messaging.Notifier(transport, - publisher_id=self.ctor_pub_id) - else: - notifier = oslo_messaging.Notifier(transport) - - prepare_kwds = {} - if hasattr(self, 'retry'): - prepare_kwds['retry'] = self.retry - if hasattr(self, 'prep_pub_id'): - prepare_kwds['publisher_id'] = self.prep_pub_id - if prepare_kwds: - notifier = notifier.prepare(**prepare_kwds) - - self.mox.StubOutWithMock(transport, '_send_notification') - - message_id = uuid.uuid4() - self.mox.StubOutWithMock(uuid, 'uuid4') - uuid.uuid4().AndReturn(message_id) - - mock_utcnow.return_value = datetime.datetime.utcnow() - - message = { - 'message_id': str(message_id), - 'publisher_id': self.expected_pub_id, - 'event_type': 'test.notify', - 'priority': self.priority.upper(), - 'payload': self.payload, - 'timestamp': str(timeutils.utcnow()), - } - - sends = [] - if self.v1: - sends.append(dict(version=1.0)) - if self.v2: - sends.append(dict(version=2.0)) - - for send_kwargs in sends: - for topic in self.topics: - if hasattr(self, 'retry'): - send_kwargs['retry'] = self.retry - else: - send_kwargs['retry'] = None - target = oslo_messaging.Target(topic='%s.%s' % (topic, - self.priority)) - transport._send_notification(target, self.ctxt, message, - **send_kwargs).InAnyOrder() - - self.mox.ReplayAll() - - method = getattr(notifier, self.priority) - method(self.ctxt, 'test.notify', self.payload) - - -TestMessagingNotifier.generate_scenarios() - - -class TestSerializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestSerializer, self).setUp() - self.addCleanup(_impl_test.reset) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_serializer(self, mock_utcnow): - transport = _FakeTransport(self.conf) - - serializer = msg_serializer.NoOpSerializer() - - notifier = oslo_messaging.Notifier(transport, - 'test.localhost', - driver='test', - topic='test', - serializer=serializer) - - message_id = uuid.uuid4() - self.mox.StubOutWithMock(uuid, 'uuid4') - uuid.uuid4().AndReturn(message_id) - - mock_utcnow.return_value = datetime.datetime.utcnow() - - self.mox.StubOutWithMock(serializer, 'serialize_context') - self.mox.StubOutWithMock(serializer, 'serialize_entity') - serializer.serialize_context(dict(user='bob')).\ - AndReturn(dict(user='alice')) - serializer.serialize_entity(dict(user='bob'), 'bar').AndReturn('sbar') - - self.mox.ReplayAll() - - notifier.info(dict(user='bob'), 'test.notify', 'bar') - - message = { - 'message_id': str(message_id), - 'publisher_id': 'test.localhost', - 'event_type': 'test.notify', - 'priority': 'INFO', - 'payload': 'sbar', - 'timestamp': str(timeutils.utcnow()), - } - - self.assertEqual([(dict(user='alice'), message, 'INFO', None)], - _impl_test.NOTIFICATIONS) - - -class TestNotifierTopics(test_utils.BaseTestCase): - - def test_topics_from_config(self): - self.config(driver=['log'], - group='oslo_messaging_notifications') - self.config(topics=['topic1', 'topic2'], - group='oslo_messaging_notifications') - transport = _FakeTransport(self.conf) - - notifier = oslo_messaging.Notifier(transport, 'test.localhost') - self.assertEqual(['topic1', 'topic2'], notifier._topics) - - def test_topics_from_kwargs(self): - self.config(driver=['log'], - group='oslo_messaging_notifications') - transport = _FakeTransport(self.conf) - - notifier = oslo_messaging.Notifier(transport, 'test.localhost', - topic='topic1') - self.assertEqual(['topic1'], notifier._topics) - notifier = oslo_messaging.Notifier(transport, 'test.localhost', - topics=['topic1', 'topic2']) - self.assertEqual(['topic1', 'topic2'], notifier._topics) - - -class TestLogNotifier(test_utils.BaseTestCase): - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_notifier(self, mock_utcnow): - self.config(driver=['log'], - group='oslo_messaging_notifications') - - transport = _FakeTransport(self.conf) - - notifier = oslo_messaging.Notifier(transport, 'test.localhost') - - message_id = uuid.uuid4() - self.mox.StubOutWithMock(uuid, 'uuid4') - uuid.uuid4().AndReturn(message_id) - - mock_utcnow.return_value = datetime.datetime.utcnow() - - message = { - 'message_id': str(message_id), - 'publisher_id': 'test.localhost', - 'event_type': 'test.notify', - 'priority': 'INFO', - 'payload': 'bar', - 'timestamp': str(timeutils.utcnow()), - } - - logger = self.mox.CreateMockAnything() - - self.mox.StubOutWithMock(logging, 'getLogger') - logging.getLogger('oslo.messaging.notification.test.notify').\ - AndReturn(logger) - - logger.info(jsonutils.dumps(message)) - - self.mox.ReplayAll() - - notifier.info({}, 'test.notify', 'bar') - - def test_sample_priority(self): - # Ensure logger drops sample-level notifications. - driver = _impl_log.LogDriver(None, None, None) - - logger = self.mox.CreateMock( - logging.getLogger('oslo.messaging.notification.foo')) - logger.sample = None - self.mox.StubOutWithMock(logging, 'getLogger') - logging.getLogger('oslo.messaging.notification.foo').\ - AndReturn(logger) - - self.mox.ReplayAll() - - msg = {'event_type': 'foo'} - driver.notify(None, msg, "sample", None) - - def test_mask_passwords(self): - # Ensure that passwords are masked with notifications - driver = _impl_log.LogDriver(None, None, None) - logger = mock.MagicMock() - logger.info = mock.MagicMock() - message = {'password': 'passw0rd', 'event_type': 'foo'} - mask_str = jsonutils.dumps(strutils.mask_dict_password(message)) - - with mock.patch.object(logging, 'getLogger') as gl: - gl.return_value = logger - driver.notify(None, message, 'info', 0) - - logger.info.assert_called_once_with(mask_str) - - -class TestRoutingNotifier(test_utils.BaseTestCase): - def setUp(self): - super(TestRoutingNotifier, self).setUp() - self.config(driver=['routing'], - group='oslo_messaging_notifications') - - transport = _FakeTransport(self.conf) - self.notifier = oslo_messaging.Notifier(transport) - self.router = self.notifier._driver_mgr['routing'].obj - - def _fake_extension_manager(self, ext): - return extension.ExtensionManager.make_test_instance( - [extension.Extension('test', None, None, ext), ]) - - def _empty_extension_manager(self): - return extension.ExtensionManager.make_test_instance([]) - - def test_should_load_plugin(self): - self.router.used_drivers = set(["zoo", "blah"]) - ext = mock.MagicMock() - ext.name = "foo" - self.assertFalse(self.router._should_load_plugin(ext)) - ext.name = "zoo" - self.assertTrue(self.router._should_load_plugin(ext)) - - def test_load_notifiers_no_config(self): - # default routing_config="" - self.router._load_notifiers() - self.assertEqual({}, self.router.routing_groups) - self.assertEqual(0, len(self.router.used_drivers)) - - def test_load_notifiers_no_extensions(self): - self.config(routing_config="routing_notifier.yaml", - group='oslo_messaging_notifications') - routing_config = r"" - config_file = mock.MagicMock() - config_file.return_value = routing_config - - with mock.patch.object(self.router, '_get_notifier_config_file', - config_file): - with mock.patch('stevedore.dispatch.DispatchExtensionManager', - return_value=self._empty_extension_manager()): - with mock.patch('oslo_messaging.notify.' - '_impl_routing.LOG') as mylog: - self.router._load_notifiers() - self.assertFalse(mylog.debug.called) - self.assertEqual({}, self.router.routing_groups) - - def test_load_notifiers_config(self): - self.config(routing_config="routing_notifier.yaml", - group='oslo_messaging_notifications') - routing_config = r""" -group_1: - rpc : foo -group_2: - rpc : blah - """ - - config_file = mock.MagicMock() - config_file.return_value = routing_config - - with mock.patch.object(self.router, '_get_notifier_config_file', - config_file): - with mock.patch('stevedore.dispatch.DispatchExtensionManager', - return_value=self._fake_extension_manager( - mock.MagicMock())): - self.router._load_notifiers() - groups = list(self.router.routing_groups.keys()) - groups.sort() - self.assertEqual(['group_1', 'group_2'], groups) - - def test_get_drivers_for_message_accepted_events(self): - config = r""" -group_1: - rpc: - accepted_events: - - foo.* - - blah.zoo.* - - zip - """ - groups = yaml.safe_load(config) - group = groups['group_1'] - - # No matching event ... - self.assertEqual([], - self.router._get_drivers_for_message( - group, "unknown", "info")) - - # Child of foo ... - self.assertEqual(['rpc'], - self.router._get_drivers_for_message( - group, "foo.1", "info")) - - # Foo itself ... - self.assertEqual([], - self.router._get_drivers_for_message( - group, "foo", "info")) - - # Child of blah.zoo - self.assertEqual(['rpc'], - self.router._get_drivers_for_message( - group, "blah.zoo.zing", "info")) - - def test_get_drivers_for_message_accepted_priorities(self): - config = r""" -group_1: - rpc: - accepted_priorities: - - info - - error - """ - groups = yaml.safe_load(config) - group = groups['group_1'] - - # No matching priority - self.assertEqual([], - self.router._get_drivers_for_message( - group, None, "unknown")) - - # Info ... - self.assertEqual(['rpc'], - self.router._get_drivers_for_message( - group, None, "info")) - - # Error (to make sure the list is getting processed) ... - self.assertEqual(['rpc'], - self.router._get_drivers_for_message( - group, None, "error")) - - def test_get_drivers_for_message_both(self): - config = r""" -group_1: - rpc: - accepted_priorities: - - info - accepted_events: - - foo.* - driver_1: - accepted_priorities: - - info - driver_2: - accepted_events: - - foo.* - """ - groups = yaml.safe_load(config) - group = groups['group_1'] - - # Valid event, but no matching priority - self.assertEqual(['driver_2'], - self.router._get_drivers_for_message( - group, 'foo.blah', "unknown")) - - # Valid priority, but no matching event - self.assertEqual(['driver_1'], - self.router._get_drivers_for_message( - group, 'unknown', "info")) - - # Happy day ... - x = self.router._get_drivers_for_message(group, 'foo.blah', "info") - x.sort() - self.assertEqual(['driver_1', 'driver_2', 'rpc'], x) - - def test_filter_func(self): - ext = mock.MagicMock() - ext.name = "rpc" - - # Good ... - self.assertTrue(self.router._filter_func(ext, {}, {}, 'info', - None, ['foo', 'rpc'])) - - # Bad - self.assertFalse(self.router._filter_func(ext, {}, {}, 'info', - None, ['foo'])) - - def test_notify(self): - self.router.routing_groups = {'group_1': None, 'group_2': None} - drivers_mock = mock.MagicMock() - drivers_mock.side_effect = [['rpc'], ['foo']] - - with mock.patch.object(self.router, 'plugin_manager') as pm: - with mock.patch.object(self.router, '_get_drivers_for_message', - drivers_mock): - self.notifier.info({}, 'my_event', {}) - self.assertEqual(sorted(['rpc', 'foo']), - sorted(pm.map.call_args[0][6])) - - def test_notify_filtered(self): - self.config(routing_config="routing_notifier.yaml", - group='oslo_messaging_notifications') - routing_config = r""" -group_1: - rpc: - accepted_events: - - my_event - rpc2: - accepted_priorities: - - info - bar: - accepted_events: - - nothing - """ - config_file = mock.MagicMock() - config_file.return_value = routing_config - - rpc_driver = mock.Mock() - rpc2_driver = mock.Mock() - bar_driver = mock.Mock() - - pm = dispatch.DispatchExtensionManager.make_test_instance( - [extension.Extension('rpc', None, None, rpc_driver), - extension.Extension('rpc2', None, None, rpc2_driver), - extension.Extension('bar', None, None, bar_driver)], - ) - - with mock.patch.object(self.router, '_get_notifier_config_file', - config_file): - with mock.patch('stevedore.dispatch.DispatchExtensionManager', - return_value=pm): - self.notifier.info({}, 'my_event', {}) - self.assertFalse(bar_driver.info.called) - rpc_driver.notify.assert_called_once_with( - {}, mock.ANY, 'INFO', None) - rpc2_driver.notify.assert_called_once_with( - {}, mock.ANY, 'INFO', None) diff --git a/oslo_messaging/tests/rpc/__init__.py b/oslo_messaging/tests/rpc/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_messaging/tests/rpc/test_client.py b/oslo_messaging/tests/rpc/test_client.py deleted file mode 100644 index 4a527bc..0000000 --- a/oslo_messaging/tests/rpc/test_client.py +++ /dev/null @@ -1,556 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import testscenarios - -import oslo_messaging -from oslo_messaging import exceptions -from oslo_messaging import serializer as msg_serializer -from oslo_messaging.tests import utils as test_utils - -load_tests = testscenarios.load_tests_apply_scenarios - - -class _FakeTransport(object): - - def __init__(self, conf): - self.conf = conf - - def _send(self, *args, **kwargs): - pass - - -class TestCastCall(test_utils.BaseTestCase): - - scenarios = [ - ('cast_no_ctxt_no_args', dict(call=False, ctxt={}, args={})), - ('call_no_ctxt_no_args', dict(call=True, ctxt={}, args={})), - ('cast_ctxt_and_args', - dict(call=False, - ctxt=dict(user='testuser', project='testtenant'), - args=dict(bar='blaa', foobar=11.01))), - ('call_ctxt_and_args', - dict(call=True, - ctxt=dict(user='testuser', project='testtenant'), - args=dict(bar='blaa', foobar=11.01))), - ] - - def test_cast_call(self): - self.config(rpc_response_timeout=None) - - transport = _FakeTransport(self.conf) - client = oslo_messaging.RPCClient(transport, oslo_messaging.Target()) - - self.mox.StubOutWithMock(transport, '_send') - - msg = dict(method='foo', args=self.args) - kwargs = {'retry': None} - if self.call: - kwargs['wait_for_reply'] = True - kwargs['timeout'] = None - - transport._send(oslo_messaging.Target(), self.ctxt, msg, **kwargs) - self.mox.ReplayAll() - - method = client.call if self.call else client.cast - method(self.ctxt, 'foo', **self.args) - - -class TestCastToTarget(test_utils.BaseTestCase): - - _base = [ - ('all_none', dict(ctor={}, prepare={}, expect={})), - ('ctor_exchange', - dict(ctor=dict(exchange='testexchange'), - prepare={}, - expect=dict(exchange='testexchange'))), - ('prepare_exchange', - dict(ctor={}, - prepare=dict(exchange='testexchange'), - expect=dict(exchange='testexchange'))), - ('prepare_exchange_none', - dict(ctor=dict(exchange='testexchange'), - prepare=dict(exchange=None), - expect={})), - ('both_exchange', - dict(ctor=dict(exchange='ctorexchange'), - prepare=dict(exchange='testexchange'), - expect=dict(exchange='testexchange'))), - ('ctor_topic', - dict(ctor=dict(topic='testtopic'), - prepare={}, - expect=dict(topic='testtopic'))), - ('prepare_topic', - dict(ctor={}, - prepare=dict(topic='testtopic'), - expect=dict(topic='testtopic'))), - ('prepare_topic_none', - dict(ctor=dict(topic='testtopic'), - prepare=dict(topic=None), - expect={})), - ('both_topic', - dict(ctor=dict(topic='ctortopic'), - prepare=dict(topic='testtopic'), - expect=dict(topic='testtopic'))), - ('ctor_namespace', - dict(ctor=dict(namespace='testnamespace'), - prepare={}, - expect=dict(namespace='testnamespace'))), - ('prepare_namespace', - dict(ctor={}, - prepare=dict(namespace='testnamespace'), - expect=dict(namespace='testnamespace'))), - ('prepare_namespace_none', - dict(ctor=dict(namespace='testnamespace'), - prepare=dict(namespace=None), - expect={})), - ('both_namespace', - dict(ctor=dict(namespace='ctornamespace'), - prepare=dict(namespace='testnamespace'), - expect=dict(namespace='testnamespace'))), - ('ctor_version', - dict(ctor=dict(version='1.1'), - prepare={}, - expect=dict(version='1.1'))), - ('prepare_version', - dict(ctor={}, - prepare=dict(version='1.1'), - expect=dict(version='1.1'))), - ('prepare_version_none', - dict(ctor=dict(version='1.1'), - prepare=dict(version=None), - expect={})), - ('both_version', - dict(ctor=dict(version='ctorversion'), - prepare=dict(version='1.1'), - expect=dict(version='1.1'))), - ('ctor_server', - dict(ctor=dict(server='testserver'), - prepare={}, - expect=dict(server='testserver'))), - ('prepare_server', - dict(ctor={}, - prepare=dict(server='testserver'), - expect=dict(server='testserver'))), - ('prepare_server_none', - dict(ctor=dict(server='testserver'), - prepare=dict(server=None), - expect={})), - ('both_server', - dict(ctor=dict(server='ctorserver'), - prepare=dict(server='testserver'), - expect=dict(server='testserver'))), - ('ctor_fanout', - dict(ctor=dict(fanout=True), - prepare={}, - expect=dict(fanout=True))), - ('prepare_fanout', - dict(ctor={}, - prepare=dict(fanout=True), - expect=dict(fanout=True))), - ('prepare_fanout_none', - dict(ctor=dict(fanout=True), - prepare=dict(fanout=None), - expect={})), - ('both_fanout', - dict(ctor=dict(fanout=True), - prepare=dict(fanout=False), - expect=dict(fanout=False))), - ] - - _prepare = [ - ('single_prepare', dict(double_prepare=False)), - ('double_prepare', dict(double_prepare=True)), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._base, - cls._prepare) - - def setUp(self): - super(TestCastToTarget, self).setUp(conf=cfg.ConfigOpts()) - - def test_cast_to_target(self): - target = oslo_messaging.Target(**self.ctor) - expect_target = oslo_messaging.Target(**self.expect) - - transport = _FakeTransport(self.conf) - client = oslo_messaging.RPCClient(transport, target) - - self.mox.StubOutWithMock(transport, '_send') - - msg = dict(method='foo', args={}) - if 'namespace' in self.expect: - msg['namespace'] = self.expect['namespace'] - if 'version' in self.expect: - msg['version'] = self.expect['version'] - transport._send(expect_target, {}, msg, retry=None) - - self.mox.ReplayAll() - - if self.prepare: - client = client.prepare(**self.prepare) - if self.double_prepare: - client = client.prepare(**self.prepare) - client.cast({}, 'foo') - - -TestCastToTarget.generate_scenarios() - - -_notset = object() - - -class TestCallTimeout(test_utils.BaseTestCase): - - scenarios = [ - ('all_none', - dict(confval=None, ctor=None, prepare=_notset, expect=None)), - ('confval', - dict(confval=21, ctor=None, prepare=_notset, expect=21)), - ('ctor', - dict(confval=None, ctor=21.1, prepare=_notset, expect=21.1)), - ('ctor_zero', - dict(confval=None, ctor=0, prepare=_notset, expect=0)), - ('prepare', - dict(confval=None, ctor=None, prepare=21.1, expect=21.1)), - ('prepare_override', - dict(confval=None, ctor=10.1, prepare=21.1, expect=21.1)), - ('prepare_zero', - dict(confval=None, ctor=None, prepare=0, expect=0)), - ] - - def test_call_timeout(self): - self.config(rpc_response_timeout=self.confval) - - transport = _FakeTransport(self.conf) - client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(), - timeout=self.ctor) - - self.mox.StubOutWithMock(transport, '_send') - - msg = dict(method='foo', args={}) - kwargs = dict(wait_for_reply=True, timeout=self.expect, retry=None) - transport._send(oslo_messaging.Target(), {}, msg, **kwargs) - - self.mox.ReplayAll() - - if self.prepare is not _notset: - client = client.prepare(timeout=self.prepare) - client.call({}, 'foo') - - -class TestCallRetry(test_utils.BaseTestCase): - - scenarios = [ - ('all_none', dict(ctor=None, prepare=_notset, expect=None)), - ('ctor', dict(ctor=21, prepare=_notset, expect=21)), - ('ctor_zero', dict(ctor=0, prepare=_notset, expect=0)), - ('prepare', dict(ctor=None, prepare=21, expect=21)), - ('prepare_override', dict(ctor=10, prepare=21, expect=21)), - ('prepare_zero', dict(ctor=None, prepare=0, expect=0)), - ] - - def test_call_retry(self): - transport = _FakeTransport(self.conf) - client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(), - retry=self.ctor) - - self.mox.StubOutWithMock(transport, '_send') - - msg = dict(method='foo', args={}) - kwargs = dict(wait_for_reply=True, timeout=60, - retry=self.expect) - transport._send(oslo_messaging.Target(), {}, msg, **kwargs) - - self.mox.ReplayAll() - - if self.prepare is not _notset: - client = client.prepare(retry=self.prepare) - client.call({}, 'foo') - - -class TestCallFanout(test_utils.BaseTestCase): - - scenarios = [ - ('target', dict(prepare=_notset, target={'fanout': True})), - ('prepare', dict(prepare={'fanout': True}, target={})), - ('both', dict(prepare={'fanout': True}, target={'fanout': True})), - ] - - def test_call_fanout(self): - transport = _FakeTransport(self.conf) - client = oslo_messaging.RPCClient(transport, - oslo_messaging.Target(**self.target)) - - if self.prepare is not _notset: - client = client.prepare(**self.prepare) - - self.assertRaises(exceptions.InvalidTarget, - client.call, {}, 'foo') - - -class TestSerializer(test_utils.BaseTestCase): - - scenarios = [ - ('cast', - dict(call=False, - ctxt=dict(user='bob'), - args=dict(a='a', b='b', c='c'), - retval=None)), - ('call', - dict(call=True, - ctxt=dict(user='bob'), - args=dict(a='a', b='b', c='c'), - retval='d')), - ] - - def test_call_serializer(self): - self.config(rpc_response_timeout=None) - - transport = _FakeTransport(self.conf) - serializer = msg_serializer.NoOpSerializer() - - client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(), - serializer=serializer) - - self.mox.StubOutWithMock(transport, '_send') - - msg = dict(method='foo', - args=dict([(k, 's' + v) for k, v in self.args.items()])) - kwargs = dict(wait_for_reply=True, timeout=None) if self.call else {} - kwargs['retry'] = None - transport._send(oslo_messaging.Target(), - dict(user='alice'), - msg, - **kwargs).AndReturn(self.retval) - - self.mox.StubOutWithMock(serializer, 'serialize_entity') - self.mox.StubOutWithMock(serializer, 'deserialize_entity') - self.mox.StubOutWithMock(serializer, 'serialize_context') - - for arg in self.args: - serializer.serialize_entity(self.ctxt, arg).AndReturn('s' + arg) - - if self.call: - serializer.deserialize_entity(self.ctxt, self.retval).\ - AndReturn('d' + self.retval) - - serializer.serialize_context(self.ctxt).AndReturn(dict(user='alice')) - - self.mox.ReplayAll() - - method = client.call if self.call else client.cast - retval = method(self.ctxt, 'foo', **self.args) - if self.retval is not None: - self.assertEqual('d' + self.retval, retval) - - -class TestVersionCap(test_utils.BaseTestCase): - - _call_vs_cast = [ - ('call', dict(call=True)), - ('cast', dict(call=False)), - ] - - _cap_scenarios = [ - ('all_none', - dict(cap=None, prepare_cap=_notset, - version=None, prepare_version=_notset, - success=True)), - ('ctor_cap_ok', - dict(cap='1.1', prepare_cap=_notset, - version='1.0', prepare_version=_notset, - success=True)), - ('ctor_cap_override_ok', - dict(cap='2.0', prepare_cap='1.1', - version='1.0', prepare_version='1.0', - success=True)), - ('ctor_cap_override_none_ok', - dict(cap='1.1', prepare_cap=None, - version='1.0', prepare_version=_notset, - success=True)), - ('ctor_cap_minor_fail', - dict(cap='1.0', prepare_cap=_notset, - version='1.1', prepare_version=_notset, - success=False)), - ('ctor_cap_major_fail', - dict(cap='2.0', prepare_cap=_notset, - version=None, prepare_version='1.0', - success=False)), - ('ctor_cap_none_version_ok', - dict(cap=None, prepare_cap=_notset, - version='1.0', prepare_version=_notset, - success=True)), - ('ctor_cap_version_none_fail', - dict(cap='1.0', prepare_cap=_notset, - version=None, prepare_version=_notset, - success=False)), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = ( - testscenarios.multiply_scenarios(cls._call_vs_cast, - cls._cap_scenarios)) - - def test_version_cap(self): - self.config(rpc_response_timeout=None) - - transport = _FakeTransport(self.conf) - - target = oslo_messaging.Target(version=self.version) - client = oslo_messaging.RPCClient(transport, target, - version_cap=self.cap) - - if self.success: - self.mox.StubOutWithMock(transport, '_send') - - if self.prepare_version is not _notset: - target = target(version=self.prepare_version) - - msg = dict(method='foo', args={}) - if target.version is not None: - msg['version'] = target.version - - kwargs = {'retry': None} - if self.call: - kwargs['wait_for_reply'] = True - kwargs['timeout'] = None - - transport._send(target, {}, msg, **kwargs) - - self.mox.ReplayAll() - - prep_kwargs = {} - if self.prepare_cap is not _notset: - prep_kwargs['version_cap'] = self.prepare_cap - if self.prepare_version is not _notset: - prep_kwargs['version'] = self.prepare_version - if prep_kwargs: - client = client.prepare(**prep_kwargs) - - method = client.call if self.call else client.cast - try: - method({}, 'foo') - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.RPCVersionCapError, ex) - self.assertFalse(self.success) - else: - self.assertTrue(self.success) - - -TestVersionCap.generate_scenarios() - - -class TestCanSendVersion(test_utils.BaseTestCase): - - scenarios = [ - ('all_none', - dict(cap=None, prepare_cap=_notset, - version=None, prepare_version=_notset, - can_send_version=_notset, - can_send=True)), - ('ctor_cap_ok', - dict(cap='1.1', prepare_cap=_notset, - version='1.0', prepare_version=_notset, - can_send_version=_notset, - can_send=True)), - ('ctor_cap_override_ok', - dict(cap='2.0', prepare_cap='1.1', - version='1.0', prepare_version='1.0', - can_send_version=_notset, - can_send=True)), - ('ctor_cap_override_none_ok', - dict(cap='1.1', prepare_cap=None, - version='1.0', prepare_version=_notset, - can_send_version=_notset, - can_send=True)), - ('ctor_cap_can_send_ok', - dict(cap='1.1', prepare_cap=None, - version='1.0', prepare_version=_notset, - can_send_version='1.1', - can_send=True)), - ('ctor_cap_can_send_none_ok', - dict(cap='1.1', prepare_cap=None, - version='1.0', prepare_version=_notset, - can_send_version=None, - can_send=True)), - ('ctor_cap_minor_fail', - dict(cap='1.0', prepare_cap=_notset, - version='1.1', prepare_version=_notset, - can_send_version=_notset, - can_send=False)), - ('ctor_cap_major_fail', - dict(cap='2.0', prepare_cap=_notset, - version=None, prepare_version='1.0', - can_send_version=_notset, - can_send=False)), - ('ctor_cap_none_version_ok', - dict(cap=None, prepare_cap=_notset, - version='1.0', prepare_version=_notset, - can_send_version=_notset, - can_send=True)), - ('ctor_cap_version_none_fail', - dict(cap='1.0', prepare_cap=_notset, - version=None, prepare_version=_notset, - can_send_version=_notset, - can_send=False)), - ('ctor_cap_version_can_send_none_fail', - dict(cap='1.0', prepare_cap=_notset, - version='1.0', prepare_version=_notset, - can_send_version=None, - can_send=False)), - ] - - def test_version_cap(self): - self.config(rpc_response_timeout=None) - - transport = _FakeTransport(self.conf) - - target = oslo_messaging.Target(version=self.version) - client = oslo_messaging.RPCClient(transport, target, - version_cap=self.cap) - - prep_kwargs = {} - if self.prepare_cap is not _notset: - prep_kwargs['version_cap'] = self.prepare_cap - if self.prepare_version is not _notset: - prep_kwargs['version'] = self.prepare_version - if prep_kwargs: - client = client.prepare(**prep_kwargs) - - if self.can_send_version is not _notset: - can_send = client.can_send_version(version=self.can_send_version) - call_context_can_send = client.prepare().can_send_version( - version=self.can_send_version) - self.assertEqual(can_send, call_context_can_send) - else: - can_send = client.can_send_version() - - self.assertEqual(self.can_send, can_send) - - def test_invalid_version_type(self): - target = oslo_messaging.Target(topic='sometopic') - transport = _FakeTransport(self.conf) - client = oslo_messaging.RPCClient(transport, target) - self.assertRaises(exceptions.MessagingException, - client.prepare, version='5') - self.assertRaises(exceptions.MessagingException, - client.prepare, version='5.a') - self.assertRaises(exceptions.MessagingException, - client.prepare, version='5.5.a') diff --git a/oslo_messaging/tests/rpc/test_dispatcher.py b/oslo_messaging/tests/rpc/test_dispatcher.py deleted file mode 100644 index 2a2f7b4..0000000 --- a/oslo_messaging/tests/rpc/test_dispatcher.py +++ /dev/null @@ -1,187 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testscenarios - -import oslo_messaging -from oslo_messaging import serializer as msg_serializer -from oslo_messaging.tests import utils as test_utils -from six.moves import mock - -load_tests = testscenarios.load_tests_apply_scenarios - - -class _FakeEndpoint(object): - - def __init__(self, target=None): - self.target = target - - def foo(self, ctxt, **kwargs): - pass - - def bar(self, ctxt, **kwargs): - pass - - -class TestDispatcher(test_utils.BaseTestCase): - - scenarios = [ - ('no_endpoints', - dict(endpoints=[], - dispatch_to=None, - ctxt={}, msg=dict(method='foo'), - success=False, ex=oslo_messaging.UnsupportedVersion)), - ('default_target', - dict(endpoints=[{}], - dispatch_to=dict(endpoint=0, method='foo'), - ctxt={}, msg=dict(method='foo'), - success=True, ex=None)), - ('default_target_ctxt_and_args', - dict(endpoints=[{}], - dispatch_to=dict(endpoint=0, method='bar'), - ctxt=dict(user='bob'), msg=dict(method='bar', - args=dict(blaa=True)), - success=True, ex=None)), - ('default_target_namespace', - dict(endpoints=[{}], - dispatch_to=dict(endpoint=0, method='foo'), - ctxt={}, msg=dict(method='foo', namespace=None), - success=True, ex=None)), - ('default_target_version', - dict(endpoints=[{}], - dispatch_to=dict(endpoint=0, method='foo'), - ctxt={}, msg=dict(method='foo', version='1.0'), - success=True, ex=None)), - ('default_target_no_such_method', - dict(endpoints=[{}], - dispatch_to=None, - ctxt={}, msg=dict(method='foobar'), - success=False, ex=oslo_messaging.NoSuchMethod)), - ('namespace', - dict(endpoints=[{}, dict(namespace='testns')], - dispatch_to=dict(endpoint=1, method='foo'), - ctxt={}, msg=dict(method='foo', namespace='testns'), - success=True, ex=None)), - ('namespace_mismatch', - dict(endpoints=[{}, dict(namespace='testns')], - dispatch_to=None, - ctxt={}, msg=dict(method='foo', namespace='nstest'), - success=False, ex=oslo_messaging.UnsupportedVersion)), - ('version', - dict(endpoints=[dict(version='1.5'), dict(version='3.4')], - dispatch_to=dict(endpoint=1, method='foo'), - ctxt={}, msg=dict(method='foo', version='3.2'), - success=True, ex=None)), - ('version_mismatch', - dict(endpoints=[dict(version='1.5'), dict(version='3.0')], - dispatch_to=None, - ctxt={}, msg=dict(method='foo', version='3.2'), - success=False, ex=oslo_messaging.UnsupportedVersion)), - ('message_in_null_namespace_with_multiple_namespaces', - dict(endpoints=[dict(namespace='testns', - legacy_namespaces=[None])], - dispatch_to=dict(endpoint=0, method='foo'), - ctxt={}, msg=dict(method='foo', namespace=None), - success=True, ex=None)), - ('message_in_wrong_namespace_with_multiple_namespaces', - dict(endpoints=[dict(namespace='testns', - legacy_namespaces=['second', None])], - dispatch_to=None, - ctxt={}, msg=dict(method='foo', namespace='wrong'), - success=False, ex=oslo_messaging.UnsupportedVersion)), - ] - - def test_dispatcher(self): - endpoints = [mock.Mock(spec=_FakeEndpoint, - target=oslo_messaging.Target(**e)) - for e in self.endpoints] - - serializer = None - dispatcher = oslo_messaging.RPCDispatcher(endpoints, serializer) - - incoming = mock.Mock(ctxt=self.ctxt, message=self.msg) - - res = None - - try: - res = dispatcher.dispatch(incoming) - except Exception as ex: - self.assertFalse(self.success, ex) - self.assertIsNotNone(self.ex, ex) - self.assertIsInstance(ex, self.ex, ex) - if isinstance(ex, oslo_messaging.NoSuchMethod): - self.assertEqual(self.msg.get('method'), ex.method) - elif isinstance(ex, oslo_messaging.UnsupportedVersion): - self.assertEqual(self.msg.get('version', '1.0'), - ex.version) - if ex.method: - self.assertEqual(self.msg.get('method'), ex.method) - else: - self.assertTrue(self.success, - "Not expected success of operation durung testing") - self.assertIsNotNone(res) - - for n, endpoint in enumerate(endpoints): - for method_name in ['foo', 'bar']: - method = getattr(endpoint, method_name) - if self.dispatch_to and n == self.dispatch_to['endpoint'] and \ - method_name == self.dispatch_to['method']: - method.assert_called_once_with( - self.ctxt, **self.msg.get('args', {})) - else: - self.assertEqual(0, method.call_count) - - -class TestSerializer(test_utils.BaseTestCase): - - scenarios = [ - ('no_args_or_retval', - dict(ctxt={}, dctxt={}, args={}, retval=None)), - ('args_and_retval', - dict(ctxt=dict(user='bob'), - dctxt=dict(user='alice'), - args=dict(a='a', b='b', c='c'), - retval='d')), - ] - - def test_serializer(self): - endpoint = _FakeEndpoint() - serializer = msg_serializer.NoOpSerializer() - dispatcher = oslo_messaging.RPCDispatcher([endpoint], serializer) - - self.mox.StubOutWithMock(endpoint, 'foo') - args = dict([(k, 'd' + v) for k, v in self.args.items()]) - endpoint.foo(self.dctxt, **args).AndReturn(self.retval) - - self.mox.StubOutWithMock(serializer, 'serialize_entity') - self.mox.StubOutWithMock(serializer, 'deserialize_entity') - self.mox.StubOutWithMock(serializer, 'deserialize_context') - - serializer.deserialize_context(self.ctxt).AndReturn(self.dctxt) - - for arg in self.args: - serializer.deserialize_entity(self.dctxt, arg).AndReturn('d' + arg) - - serializer.serialize_entity(self.dctxt, self.retval).\ - AndReturn('s' + self.retval if self.retval else None) - - self.mox.ReplayAll() - - incoming = mock.Mock() - incoming.ctxt = self.ctxt - incoming.message = dict(method='foo', args=self.args) - retval = dispatcher.dispatch(incoming) - if self.retval is not None: - self.assertEqual('s' + self.retval, retval) diff --git a/oslo_messaging/tests/rpc/test_server.py b/oslo_messaging/tests/rpc/test_server.py deleted file mode 100644 index 03e46c8..0000000 --- a/oslo_messaging/tests/rpc/test_server.py +++ /dev/null @@ -1,863 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -import threading - -from oslo_config import cfg -import testscenarios - -import mock -import oslo_messaging -from oslo_messaging.rpc import server as rpc_server_module -from oslo_messaging import server as server_module -from oslo_messaging.tests import utils as test_utils - -load_tests = testscenarios.load_tests_apply_scenarios - - -class ServerSetupMixin(object): - - class Server(object): - def __init__(self, transport, topic, server, endpoint, serializer): - self.controller = ServerSetupMixin.ServerController() - target = oslo_messaging.Target(topic=topic, server=server) - self.server = oslo_messaging.get_rpc_server(transport, - target, - [endpoint, - self.controller], - serializer=serializer) - - def wait(self): - # Wait for the executor to process the stop message, indicating all - # test messages have been processed - self.controller.stopped.wait() - - # Check start() does nothing with a running server - self.server.start() - self.server.stop() - self.server.wait() - - def start(self): - self.server.start() - - class ServerController(object): - def __init__(self): - self.stopped = threading.Event() - - def stop(self, ctxt): - self.stopped.set() - - class TestSerializer(object): - - def serialize_entity(self, ctxt, entity): - return ('s' + entity) if entity else entity - - def deserialize_entity(self, ctxt, entity): - return ('d' + entity) if entity else entity - - def serialize_context(self, ctxt): - return dict([(k, 's' + v) for k, v in ctxt.items()]) - - def deserialize_context(self, ctxt): - return dict([(k, 'd' + v) for k, v in ctxt.items()]) - - def __init__(self): - self.serializer = self.TestSerializer() - - def _setup_server(self, transport, endpoint, topic=None, server=None): - server = self.Server(transport, - topic=topic or 'testtopic', - server=server or 'testserver', - endpoint=endpoint, - serializer=self.serializer) - - server.start() - return server - - def _stop_server(self, client, server, topic=None): - if topic is not None: - client = client.prepare(topic=topic) - client.cast({}, 'stop') - server.wait() - - def _setup_client(self, transport, topic='testtopic'): - return oslo_messaging.RPCClient(transport, - oslo_messaging.Target(topic=topic), - serializer=self.serializer) - - -class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin): - - def __init__(self, *args): - super(TestRPCServer, self).__init__(*args) - ServerSetupMixin.__init__(self) - - def setUp(self): - super(TestRPCServer, self).setUp(conf=cfg.ConfigOpts()) - - def test_constructor(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - target = oslo_messaging.Target(topic='foo', server='bar') - endpoints = [object()] - serializer = object() - - server = oslo_messaging.get_rpc_server(transport, target, endpoints, - serializer=serializer) - - self.assertIs(server.conf, self.conf) - self.assertIs(server.transport, transport) - self.assertIsInstance(server.dispatcher, oslo_messaging.RPCDispatcher) - self.assertIs(server.dispatcher.endpoints, endpoints) - self.assertIs(server.dispatcher.serializer, serializer) - self.assertEqual('blocking', server.executor_type) - - def test_server_wait_method(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - target = oslo_messaging.Target(topic='foo', server='bar') - endpoints = [object()] - serializer = object() - - class MagicMockIgnoreArgs(mock.MagicMock): - """MagicMock ignores arguments. - - A MagicMock which can never misinterpret the arguments passed to - it during construction. - """ - - def __init__(self, *args, **kwargs): - super(MagicMockIgnoreArgs, self).__init__() - - server = oslo_messaging.get_rpc_server(transport, target, endpoints, - serializer=serializer) - # Mocking executor - server._executor_cls = MagicMockIgnoreArgs - server._create_listener = MagicMockIgnoreArgs() - server.dispatcher = MagicMockIgnoreArgs() - # Here assigning executor's listener object to listener variable - # before calling wait method, because in wait method we are - # setting executor to None. - server.start() - listener = server.listener - server.stop() - # call server wait method - server.wait() - self.assertEqual(1, listener.cleanup.call_count) - - def test_no_target_server(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - server = oslo_messaging.get_rpc_server( - transport, - oslo_messaging.Target(topic='testtopic'), - []) - try: - server.start() - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) - self.assertEqual('testtopic', ex.target.topic) - else: - self.assertTrue(False) - - def test_no_server_topic(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - target = oslo_messaging.Target(server='testserver') - server = oslo_messaging.get_rpc_server(transport, target, []) - try: - server.start() - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) - self.assertEqual('testserver', ex.target.server) - else: - self.assertTrue(False) - - def _test_no_client_topic(self, call=True): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - client = self._setup_client(transport, topic=None) - - method = client.call if call else client.cast - - try: - method({}, 'ping', arg='foo') - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) - self.assertIsNotNone(ex.target) - else: - self.assertTrue(False) - - def test_no_client_topic_call(self): - self._test_no_client_topic(call=True) - - def test_no_client_topic_cast(self): - self._test_no_client_topic(call=False) - - def test_client_call_timeout(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - finished = False - wait = threading.Condition() - - class TestEndpoint(object): - def ping(self, ctxt, arg): - with wait: - if not finished: - wait.wait() - - server_thread = self._setup_server(transport, TestEndpoint()) - client = self._setup_client(transport) - - try: - client.prepare(timeout=0).call({}, 'ping', arg='foo') - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex) - else: - self.assertTrue(False) - - with wait: - finished = True - wait.notify() - - self._stop_server(client, server_thread) - - def test_unknown_executor(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - try: - oslo_messaging.get_rpc_server(transport, None, [], executor='foo') - except Exception as ex: - self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) - self.assertEqual('foo', ex.executor) - else: - self.assertTrue(False) - - def test_cast(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - class TestEndpoint(object): - def __init__(self): - self.pings = [] - - def ping(self, ctxt, arg): - self.pings.append(arg) - - endpoint = TestEndpoint() - server_thread = self._setup_server(transport, endpoint) - client = self._setup_client(transport) - - client.cast({}, 'ping', arg='foo') - client.cast({}, 'ping', arg='bar') - - self._stop_server(client, server_thread) - - self.assertEqual(['dsfoo', 'dsbar'], endpoint.pings) - - def test_call(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - class TestEndpoint(object): - def ping(self, ctxt, arg): - return arg - - server_thread = self._setup_server(transport, TestEndpoint()) - client = self._setup_client(transport) - - self.assertIsNone(client.call({}, 'ping', arg=None)) - self.assertEqual(0, client.call({}, 'ping', arg=0)) - self.assertFalse(client.call({}, 'ping', arg=False)) - self.assertEqual([], client.call({}, 'ping', arg=[])) - self.assertEqual({}, client.call({}, 'ping', arg={})) - self.assertEqual('dsdsfoo', client.call({}, 'ping', arg='foo')) - - self._stop_server(client, server_thread) - - def test_direct_call(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - class TestEndpoint(object): - def ping(self, ctxt, arg): - return arg - - server_thread = self._setup_server(transport, TestEndpoint()) - client = self._setup_client(transport) - - direct = client.prepare(server='testserver') - self.assertIsNone(direct.call({}, 'ping', arg=None)) - self.assertEqual(0, client.call({}, 'ping', arg=0)) - self.assertFalse(client.call({}, 'ping', arg=False)) - self.assertEqual([], client.call({}, 'ping', arg=[])) - self.assertEqual({}, client.call({}, 'ping', arg={})) - self.assertEqual('dsdsfoo', direct.call({}, 'ping', arg='foo')) - - self._stop_server(client, server_thread) - - def test_context(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - class TestEndpoint(object): - def ctxt_check(self, ctxt, key): - return ctxt[key] - - server_thread = self._setup_server(transport, TestEndpoint()) - client = self._setup_client(transport) - - self.assertEqual('dsdsb', - client.call({'dsa': 'b'}, - 'ctxt_check', - key='a')) - - self._stop_server(client, server_thread) - - def test_failure(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - class TestEndpoint(object): - def ping(self, ctxt, arg): - raise ValueError(arg) - - debugs = [] - errors = [] - - def stub_debug(msg, *a, **kw): - if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): - a = a[0] - debugs.append(str(msg) % a) - - def stub_error(msg, *a, **kw): - if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): - a = a[0] - errors.append(str(msg) % a) - - self.stubs.Set(rpc_server_module.LOG, 'debug', stub_debug) - self.stubs.Set(rpc_server_module.LOG, 'error', stub_error) - - server_thread = self._setup_server(transport, TestEndpoint()) - client = self._setup_client(transport) - - try: - client.call({}, 'ping', arg='foo') - except Exception as ex: - self.assertIsInstance(ex, ValueError) - self.assertEqual('dsfoo', str(ex)) - self.assertTrue(len(debugs) == 0) - self.assertTrue(len(errors) > 0) - else: - self.assertTrue(False) - - self._stop_server(client, server_thread) - - def test_expected_failure(self): - transport = oslo_messaging.get_transport(self.conf, url='fake:') - - debugs = [] - errors = [] - - def stub_debug(msg, *a, **kw): - if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): - a = a[0] - debugs.append(str(msg) % a) - - def stub_error(msg, *a, **kw): - if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): - a = a[0] - errors.append(str(msg) % a) - - self.stubs.Set(rpc_server_module.LOG, 'debug', stub_debug) - self.stubs.Set(rpc_server_module.LOG, 'error', stub_error) - - class TestEndpoint(object): - @oslo_messaging.expected_exceptions(ValueError) - def ping(self, ctxt, arg): - raise ValueError(arg) - - server_thread = self._setup_server(transport, TestEndpoint()) - client = self._setup_client(transport) - - try: - client.call({}, 'ping', arg='foo') - except Exception as ex: - self.assertIsInstance(ex, ValueError) - self.assertEqual('dsfoo', str(ex)) - self.assertTrue(len(debugs) > 0) - self.assertTrue(len(errors) == 0) - else: - self.assertTrue(False) - - self._stop_server(client, server_thread) - - -class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin): - - _exchanges = [ - ('same_exchange', dict(exchange1=None, exchange2=None)), - ('diff_exchange', dict(exchange1='x1', exchange2='x2')), - ] - - _topics = [ - ('same_topic', dict(topic1='t', topic2='t')), - ('diff_topic', dict(topic1='t1', topic2='t2')), - ] - - _server = [ - ('same_server', dict(server1=None, server2=None)), - ('diff_server', dict(server1='s1', server2='s2')), - ] - - _fanout = [ - ('not_fanout', dict(fanout1=None, fanout2=None)), - ('fanout', dict(fanout1=True, fanout2=True)), - ] - - _method = [ - ('call', dict(call1=True, call2=True)), - ('cast', dict(call1=False, call2=False)), - ] - - _endpoints = [ - ('one_endpoint', - dict(multi_endpoints=False, - expect1=['ds1', 'ds2'], - expect2=['ds1', 'ds2'])), - ('two_endpoints', - dict(multi_endpoints=True, - expect1=['ds1'], - expect2=['ds2'])), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges, - cls._topics, - cls._server, - cls._fanout, - cls._method, - cls._endpoints) - - # fanout call not supported - def filter_fanout_call(scenario): - params = scenario[1] - fanout = params['fanout1'] or params['fanout2'] - call = params['call1'] or params['call2'] - return not (call and fanout) - - # listening multiple times on same topic/server pair not supported - def filter_same_topic_and_server(scenario): - params = scenario[1] - single_topic = params['topic1'] == params['topic2'] - single_server = params['server1'] == params['server2'] - return not (single_topic and single_server) - - # fanout to multiple servers on same topic and exchange - # each endpoint will receive both messages - def fanout_to_servers(scenario): - params = scenario[1] - fanout = params['fanout1'] or params['fanout2'] - single_exchange = params['exchange1'] == params['exchange2'] - single_topic = params['topic1'] == params['topic2'] - multi_servers = params['server1'] != params['server2'] - if fanout and single_exchange and single_topic and multi_servers: - params['expect1'] = params['expect1'][:] + params['expect1'] - params['expect2'] = params['expect2'][:] + params['expect2'] - return scenario - - # multiple endpoints on same topic and exchange - # either endpoint can get either message - def single_topic_multi_endpoints(scenario): - params = scenario[1] - single_exchange = params['exchange1'] == params['exchange2'] - single_topic = params['topic1'] == params['topic2'] - if single_topic and single_exchange and params['multi_endpoints']: - params['expect_either'] = (params['expect1'] + - params['expect2']) - params['expect1'] = params['expect2'] = [] - else: - params['expect_either'] = [] - return scenario - - for f in [filter_fanout_call, filter_same_topic_and_server]: - cls.scenarios = [i for i in cls.scenarios if f(i)] - for m in [fanout_to_servers, single_topic_multi_endpoints]: - cls.scenarios = [m(i) for i in cls.scenarios] - - def __init__(self, *args): - super(TestMultipleServers, self).__init__(*args) - ServerSetupMixin.__init__(self) - - def setUp(self): - super(TestMultipleServers, self).setUp(conf=cfg.ConfigOpts()) - - def test_multiple_servers(self): - url1 = 'fake:///' + (self.exchange1 or '') - url2 = 'fake:///' + (self.exchange2 or '') - - transport1 = oslo_messaging.get_transport(self.conf, url=url1) - if url1 != url2: - transport2 = oslo_messaging.get_transport(self.conf, url=url1) - else: - transport2 = transport1 - - class TestEndpoint(object): - def __init__(self): - self.pings = [] - - def ping(self, ctxt, arg): - self.pings.append(arg) - - def alive(self, ctxt): - return 'alive' - - if self.multi_endpoints: - endpoint1, endpoint2 = TestEndpoint(), TestEndpoint() - else: - endpoint1 = endpoint2 = TestEndpoint() - - server1 = self._setup_server(transport1, endpoint1, - topic=self.topic1, server=self.server1) - server2 = self._setup_server(transport2, endpoint2, - topic=self.topic2, server=self.server2) - - client1 = self._setup_client(transport1, topic=self.topic1) - client2 = self._setup_client(transport2, topic=self.topic2) - - client1 = client1.prepare(server=self.server1) - client2 = client2.prepare(server=self.server2) - - if self.fanout1: - client1.call({}, 'alive') - client1 = client1.prepare(fanout=True) - if self.fanout2: - client2.call({}, 'alive') - client2 = client2.prepare(fanout=True) - - (client1.call if self.call1 else client1.cast)({}, 'ping', arg='1') - (client2.call if self.call2 else client2.cast)({}, 'ping', arg='2') - - self._stop_server(client1.prepare(fanout=None), - server1, topic=self.topic1) - self._stop_server(client2.prepare(fanout=None), - server2, topic=self.topic2) - - def check(pings, expect): - self.assertEqual(len(expect), len(pings)) - for a in expect: - self.assertIn(a, pings) - - if self.expect_either: - check(endpoint1.pings + endpoint2.pings, self.expect_either) - else: - check(endpoint1.pings, self.expect1) - check(endpoint2.pings, self.expect2) - - -TestMultipleServers.generate_scenarios() - - -class TestServerLocking(test_utils.BaseTestCase): - def setUp(self): - super(TestServerLocking, self).setUp(conf=cfg.ConfigOpts()) - - def _logmethod(name): - def method(self, *args, **kwargs): - with self._lock: - self._calls.append(name) - return method - - executors = [] - - class FakeExecutor(object): - def __init__(self, *args, **kwargs): - self._lock = threading.Lock() - self._calls = [] - executors.append(self) - - submit = _logmethod('submit') - shutdown = _logmethod('shutdown') - - self.executors = executors - - class MessageHandlingServerImpl(oslo_messaging.MessageHandlingServer): - def _create_listener(self): - return mock.Mock() - - def _process_incoming(self, incoming): - pass - - self.server = MessageHandlingServerImpl(mock.Mock(), mock.Mock()) - self.server._executor_cls = FakeExecutor - - def test_start_stop_wait(self): - # Test a simple execution of start, stop, wait in order - - eventlet.spawn(self.server.start) - self.server.stop() - self.server.wait() - - self.assertEqual(1, len(self.executors)) - self.assertEqual(['shutdown'], self.executors[0]._calls) - self.assertTrue(self.server.listener.cleanup.called) - - def test_reversed_order(self): - # Test that if we call wait, stop, start, these will be correctly - # reordered - - eventlet.spawn(self.server.wait) - # This is non-deterministic, but there's not a great deal we can do - # about that - eventlet.sleep(0) - - eventlet.spawn(self.server.stop) - eventlet.sleep(0) - - eventlet.spawn(self.server.start) - - self.server.wait() - - self.assertEqual(1, len(self.executors)) - self.assertEqual(['shutdown'], self.executors[0]._calls) - - def test_wait_for_running_task(self): - # Test that if 2 threads call a method simultaneously, both will wait, - # but only 1 will call the underlying executor method. - - start_event = threading.Event() - finish_event = threading.Event() - - running_event = threading.Event() - done_event = threading.Event() - - _runner = [None] - - class SteppingFakeExecutor(self.server._executor_cls): - def __init__(self, *args, **kwargs): - # Tell the test which thread won the race - _runner[0] = eventlet.getcurrent() - running_event.set() - - start_event.wait() - super(SteppingFakeExecutor, self).__init__(*args, **kwargs) - done_event.set() - - finish_event.wait() - - self.server._executor_cls = SteppingFakeExecutor - - start1 = eventlet.spawn(self.server.start) - start2 = eventlet.spawn(self.server.start) - - # Wait until one of the threads starts running - running_event.wait() - runner = _runner[0] - waiter = start2 if runner == start1 else start2 - - waiter_finished = threading.Event() - waiter.link(lambda _: waiter_finished.set()) - - # At this point, runner is running start(), and waiter() is waiting for - # it to complete. runner has not yet logged anything. - self.assertEqual(0, len(self.executors)) - self.assertFalse(waiter_finished.is_set()) - - # Let the runner log the call - start_event.set() - done_event.wait() - - # We haven't signalled completion yet, so submit shouldn't have run - self.assertEqual(1, len(self.executors)) - self.assertEqual([], self.executors[0]._calls) - self.assertFalse(waiter_finished.is_set()) - - # Let the runner complete - finish_event.set() - waiter.wait() - runner.wait() - - # Check that both threads have finished, start was only called once, - # and execute ran - self.assertTrue(waiter_finished.is_set()) - self.assertEqual(1, len(self.executors)) - self.assertEqual([], self.executors[0]._calls) - - def test_start_stop_wait_stop_wait(self): - # Test that we behave correctly when calling stop/wait more than once. - # Subsequent calls should be noops. - - self.server.start() - self.server.stop() - self.server.wait() - self.server.stop() - self.server.wait() - - self.assertEqual(len(self.executors), 1) - self.assertEqual(['shutdown'], self.executors[0]._calls) - self.assertTrue(self.server.listener.cleanup.called) - - def test_state_wrapping(self): - # Test that we behave correctly if a thread waits, and the server state - # has wrapped when it it next scheduled - - # Ensure that if 2 threads wait for the completion of 'start', the - # first will wait until complete_event is signalled, but the second - # will continue - complete_event = threading.Event() - complete_waiting_callback = threading.Event() - - start_state = self.server._states['start'] - old_wait_for_completion = start_state.wait_for_completion - waited = [False] - - def new_wait_for_completion(*args, **kwargs): - if not waited[0]: - waited[0] = True - complete_waiting_callback.set() - complete_event.wait() - old_wait_for_completion(*args, **kwargs) - - start_state.wait_for_completion = new_wait_for_completion - - # thread1 will wait for start to complete until we signal it - thread1 = eventlet.spawn(self.server.stop) - thread1_finished = threading.Event() - thread1.link(lambda _: thread1_finished.set()) - - self.server.start() - complete_waiting_callback.wait() - - # The server should have started, but stop should not have been called - self.assertEqual(1, len(self.executors)) - self.assertEqual([], self.executors[0]._calls) - self.assertFalse(thread1_finished.is_set()) - - self.server.stop() - self.server.wait() - - # We should have gone through all the states, and thread1 should still - # be waiting - self.assertEqual(1, len(self.executors)) - self.assertEqual(['shutdown'], self.executors[0]._calls) - self.assertFalse(thread1_finished.is_set()) - - # Start again - self.server.start() - - # We should now record 4 executors (2 for each server) - self.assertEqual(2, len(self.executors)) - self.assertEqual(['shutdown'], self.executors[0]._calls) - self.assertEqual([], self.executors[1]._calls) - self.assertFalse(thread1_finished.is_set()) - - # Allow thread1 to complete - complete_event.set() - thread1_finished.wait() - - # thread1 should now have finished, and stop should not have been - # called again on either the first or second executor - self.assertEqual(2, len(self.executors)) - self.assertEqual(['shutdown'], self.executors[0]._calls) - self.assertEqual([], self.executors[1]._calls) - self.assertTrue(thread1_finished.is_set()) - - @mock.patch.object(server_module, 'DEFAULT_LOG_AFTER', 1) - @mock.patch.object(server_module, 'LOG') - def test_logging(self, mock_log): - # Test that we generate a log message if we wait longer than - # DEFAULT_LOG_AFTER - - log_event = threading.Event() - mock_log.warning.side_effect = lambda _, __: log_event.set() - - # Call stop without calling start. We should log a wait after 1 second - thread = eventlet.spawn(self.server.stop) - log_event.wait() - - # Redundant given that we already waited, but it's nice to assert - self.assertTrue(mock_log.warning.called) - thread.kill() - - @mock.patch.object(server_module, 'LOG') - def test_logging_explicit_wait(self, mock_log): - # Test that we generate a log message if we wait longer than - # the number of seconds passed to log_after - - log_event = threading.Event() - mock_log.warning.side_effect = lambda _, __: log_event.set() - - # Call stop without calling start. We should log a wait after 1 second - thread = eventlet.spawn(self.server.stop, log_after=1) - log_event.wait() - - # Redundant given that we already waited, but it's nice to assert - self.assertTrue(mock_log.warning.called) - thread.kill() - - @mock.patch.object(server_module, 'LOG') - def test_logging_with_timeout(self, mock_log): - # Test that we log a message after log_after seconds if we've also - # specified an absolute timeout - - log_event = threading.Event() - mock_log.warning.side_effect = lambda _, __: log_event.set() - - # Call stop without calling start. We should log a wait after 1 second - thread = eventlet.spawn(self.server.stop, log_after=1, timeout=2) - log_event.wait() - - # Redundant given that we already waited, but it's nice to assert - self.assertTrue(mock_log.warning.called) - thread.kill() - - def test_timeout_wait(self): - # Test that we will eventually timeout when passing the timeout option - # if a preceding condition is not satisfied. - - self.assertRaises(server_module.TaskTimeout, - self.server.stop, timeout=1) - - def test_timeout_running(self): - # Test that we will eventually timeout if we're waiting for another - # thread to complete this task - - # Start the server, which will also instantiate an executor - self.server.start() - self.server.stop() - shutdown_called = threading.Event() - - # Patch the executor's stop method to be very slow - def slow_shutdown(wait): - shutdown_called.set() - eventlet.sleep(10) - self.executors[0].shutdown = slow_shutdown - - # Call wait in a new thread - thread = eventlet.spawn(self.server.wait) - - # Wait until the thread is in the slow stop method - shutdown_called.wait() - - # Call wait again in the main thread with a timeout - self.assertRaises(server_module.TaskTimeout, - self.server.wait, timeout=1) - thread.kill() - - @mock.patch.object(server_module, 'LOG') - def test_log_after_zero(self, mock_log): - # Test that we do not log a message after DEFAULT_LOG_AFTER if the - # caller gave log_after=1 - - # Call stop without calling start. - self.assertRaises(server_module.TaskTimeout, - self.server.stop, log_after=0, timeout=2) - - # We timed out. Ensure we didn't log anything. - self.assertFalse(mock_log.warning.called) diff --git a/oslo_messaging/tests/test_config_opts_proxy.py b/oslo_messaging/tests/test_config_opts_proxy.py deleted file mode 100644 index e51794c..0000000 --- a/oslo_messaging/tests/test_config_opts_proxy.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_config import types - -from oslo_messaging._drivers import common as drv_cmn -from oslo_messaging.tests import utils as test_utils -from oslo_messaging import transport - - -class TestConfigOptsProxy(test_utils.BaseTestCase): - - def test_rabbit(self): - group = 'oslo_messaging_rabbit' - self.config(rabbit_retry_interval=1, - rabbit_qos_prefetch_count=0, - rabbit_max_retries=3, - kombu_reconnect_delay=5.0, - group=group) - dummy_opts = [cfg.ListOpt('list_str', item_type=types.String(), - default=[]), - cfg.ListOpt('list_int', item_type=types.Integer(), - default=[]), - cfg.DictOpt('dict', default={}), - cfg.BoolOpt('bool', default=False), - cfg.StrOpt('str', default='default')] - self.conf.register_opts(dummy_opts, group=group) - url = transport.TransportURL.parse( - self.conf, "rabbit:///" - "?rabbit_qos_prefetch_count=2" - "&unknown_opt=4" - "&kombu_reconnect_delay=invalid_value" - "&list_str=1&list_str=2&list_str=3" - "&list_int=1&list_int=2&list_int=3" - "&dict=x:1&dict=y:2&dict=z:3" - "&bool=True" - ) - conf = drv_cmn.ConfigOptsProxy(self.conf, url) - self.assertRaises(cfg.NoSuchOptError, - conf.__getattr__, - 'unknown_group') - self.assertTrue(isinstance(getattr(conf, group), - conf.GroupAttrProxy)) - self.assertEqual(1, conf.oslo_messaging_rabbit.rabbit_retry_interval) - self.assertEqual(2, - conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count) - self.assertEqual(3, conf.oslo_messaging_rabbit.rabbit_max_retries) - self.assertRaises(cfg.NoSuchOptError, - conf.oslo_messaging_rabbit.__getattr__, - 'unknown_opt') - self.assertRaises(ValueError, - conf.oslo_messaging_rabbit.__getattr__, - 'kombu_reconnect_delay') - self.assertEqual(['1', '2', '3'], conf.oslo_messaging_rabbit.list_str) - self.assertEqual([1, 2, 3], conf.oslo_messaging_rabbit.list_int) - self.assertEqual({'x': '1', 'y': '2', 'z': '3'}, - conf.oslo_messaging_rabbit.dict) - self.assertEqual(True, conf.oslo_messaging_rabbit.bool) - self.assertEqual('default', conf.oslo_messaging_rabbit.str) diff --git a/oslo_messaging/tests/test_exception_serialization.py b/oslo_messaging/tests/test_exception_serialization.py deleted file mode 100644 index ca4f92b..0000000 --- a/oslo_messaging/tests/test_exception_serialization.py +++ /dev/null @@ -1,288 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from oslo_serialization import jsonutils -import six -import testscenarios - -import oslo_messaging -from oslo_messaging._drivers import common as exceptions -from oslo_messaging.tests import utils as test_utils - -load_tests = testscenarios.load_tests_apply_scenarios - -EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' - - -class NovaStyleException(Exception): - - format = 'I am Nova' - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - if not message: - message = self.format % kwargs - super(NovaStyleException, self).__init__(message) - - -class KwargsStyleException(NovaStyleException): - - format = 'I am %(who)s' - - -def add_remote_postfix(ex): - ex_type = type(ex) - message = str(ex) - str_override = lambda self: message - new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), - {'__str__': str_override, - '__unicode__': str_override}) - new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__ - try: - ex.__class__ = new_ex_type - except TypeError: - ex.args = (message,) + ex.args[1:] - return ex - - -class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase): - - _add_remote = [ - ('add_remote', dict(add_remote=True)), - ('do_not_add_remote', dict(add_remote=False)), - ] - - _exception_types = [ - ('bog_standard', dict(cls=Exception, - args=['test'], - kwargs={}, - clsname='Exception', - modname=EXCEPTIONS_MODULE, - msg='test')), - ('nova_style', dict(cls=NovaStyleException, - args=[], - kwargs={}, - clsname='NovaStyleException', - modname=__name__, - msg='I am Nova')), - ('nova_style_with_msg', dict(cls=NovaStyleException, - args=['testing'], - kwargs={}, - clsname='NovaStyleException', - modname=__name__, - msg='testing')), - ('kwargs_style', dict(cls=KwargsStyleException, - args=[], - kwargs={'who': 'Oslo'}, - clsname='KwargsStyleException', - modname=__name__, - msg='I am Oslo')), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios(cls._add_remote, - cls._exception_types) - - def setUp(self): - super(SerializeRemoteExceptionTestCase, self).setUp() - - def test_serialize_remote_exception(self): - try: - try: - raise self.cls(*self.args, **self.kwargs) - except Exception as ex: - # Note: in Python 3 ex variable will be cleared at the end of - # the except clause, so explicitly make an extra copy of it - cls_error = ex - if self.add_remote: - ex = add_remote_postfix(ex) - raise ex - except Exception: - exc_info = sys.exc_info() - - serialized = exceptions.serialize_remote_exception(exc_info) - - failure = jsonutils.loads(serialized) - - self.assertEqual(self.clsname, failure['class'], failure) - self.assertEqual(self.modname, failure['module']) - self.assertEqual(self.msg, failure['message']) - self.assertEqual([self.msg], failure['args']) - self.assertEqual(self.kwargs, failure['kwargs']) - - # Note: _Remote prefix not stripped from tracebacks - tb = cls_error.__class__.__name__ + ': ' + self.msg - self.assertIn(tb, ''.join(failure['tb'])) - - -SerializeRemoteExceptionTestCase.generate_scenarios() - - -class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase): - - _standard_allowed = [__name__] - - scenarios = [ - ('bog_standard', - dict(allowed=_standard_allowed, - clsname='Exception', - modname=EXCEPTIONS_MODULE, - cls=Exception, - args=['test'], - kwargs={}, - str='test\ntraceback\ntraceback\n', - remote_name='Exception', - remote_args=('test\ntraceback\ntraceback\n', ), - remote_kwargs={})), - ('nova_style', - dict(allowed=_standard_allowed, - clsname='NovaStyleException', - modname=__name__, - cls=NovaStyleException, - args=[], - kwargs={}, - str='test\ntraceback\ntraceback\n', - remote_name='NovaStyleException_Remote', - remote_args=('I am Nova', ), - remote_kwargs={})), - ('nova_style_with_msg', - dict(allowed=_standard_allowed, - clsname='NovaStyleException', - modname=__name__, - cls=NovaStyleException, - args=['testing'], - kwargs={}, - str='test\ntraceback\ntraceback\n', - remote_name='NovaStyleException_Remote', - remote_args=('testing', ), - remote_kwargs={})), - ('kwargs_style', - dict(allowed=_standard_allowed, - clsname='KwargsStyleException', - modname=__name__, - cls=KwargsStyleException, - args=[], - kwargs={'who': 'Oslo'}, - str='test\ntraceback\ntraceback\n', - remote_name='KwargsStyleException_Remote', - remote_args=('I am Oslo', ), - remote_kwargs={})), - ('not_allowed', - dict(allowed=[], - clsname='NovaStyleException', - modname=__name__, - cls=oslo_messaging.RemoteError, - args=[], - kwargs={}, - str=("Remote error: NovaStyleException test\n" - "[%r]." % u'traceback\ntraceback\n'), - msg=("Remote error: NovaStyleException test\n" - "[%r]." % u'traceback\ntraceback\n'), - remote_name='RemoteError', - remote_args=(), - remote_kwargs={'exc_type': 'NovaStyleException', - 'value': 'test', - 'traceback': 'traceback\ntraceback\n'})), - ('unknown_module', - dict(allowed=['notexist'], - clsname='Exception', - modname='notexist', - cls=oslo_messaging.RemoteError, - args=[], - kwargs={}, - str=("Remote error: Exception test\n" - "[%r]." % u'traceback\ntraceback\n'), - msg=("Remote error: Exception test\n" - "[%r]." % u'traceback\ntraceback\n'), - remote_name='RemoteError', - remote_args=(), - remote_kwargs={'exc_type': 'Exception', - 'value': 'test', - 'traceback': 'traceback\ntraceback\n'})), - ('unknown_exception', - dict(allowed=[], - clsname='FarcicalError', - modname=EXCEPTIONS_MODULE, - cls=oslo_messaging.RemoteError, - args=[], - kwargs={}, - str=("Remote error: FarcicalError test\n" - "[%r]." % u'traceback\ntraceback\n'), - msg=("Remote error: FarcicalError test\n" - "[%r]." % u'traceback\ntraceback\n'), - remote_name='RemoteError', - remote_args=(), - remote_kwargs={'exc_type': 'FarcicalError', - 'value': 'test', - 'traceback': 'traceback\ntraceback\n'})), - ('unknown_kwarg', - dict(allowed=[], - clsname='Exception', - modname=EXCEPTIONS_MODULE, - cls=oslo_messaging.RemoteError, - args=[], - kwargs={'foobar': 'blaa'}, - str=("Remote error: Exception test\n" - "[%r]." % u'traceback\ntraceback\n'), - msg=("Remote error: Exception test\n" - "[%r]." % u'traceback\ntraceback\n'), - remote_name='RemoteError', - remote_args=(), - remote_kwargs={'exc_type': 'Exception', - 'value': 'test', - 'traceback': 'traceback\ntraceback\n'})), - ('system_exit', - dict(allowed=[], - clsname='SystemExit', - modname=EXCEPTIONS_MODULE, - cls=oslo_messaging.RemoteError, - args=[], - kwargs={}, - str=("Remote error: SystemExit test\n" - "[%r]." % u'traceback\ntraceback\n'), - msg=("Remote error: SystemExit test\n" - "[%r]." % u'traceback\ntraceback\n'), - remote_name='RemoteError', - remote_args=(), - remote_kwargs={'exc_type': 'SystemExit', - 'value': 'test', - 'traceback': 'traceback\ntraceback\n'})), - ] - - def test_deserialize_remote_exception(self): - failure = { - 'class': self.clsname, - 'module': self.modname, - 'message': 'test', - 'tb': ['traceback\ntraceback\n'], - 'args': self.args, - 'kwargs': self.kwargs, - } - - serialized = jsonutils.dumps(failure) - - ex = exceptions.deserialize_remote_exception(serialized, self.allowed) - - self.assertIsInstance(ex, self.cls) - self.assertEqual(self.remote_name, ex.__class__.__name__) - self.assertEqual(self.str, six.text_type(ex)) - if hasattr(self, 'msg'): - self.assertEqual(self.msg, six.text_type(ex)) - self.assertEqual((self.msg,) + self.remote_args, ex.args) - else: - self.assertEqual(self.remote_args, ex.args) diff --git a/oslo_messaging/tests/test_expected_exceptions.py b/oslo_messaging/tests/test_expected_exceptions.py deleted file mode 100644 index 40c4a22..0000000 --- a/oslo_messaging/tests/test_expected_exceptions.py +++ /dev/null @@ -1,66 +0,0 @@ - -# Copyright 2012 OpenStack Foundation -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_messaging -from oslo_messaging.tests import utils as test_utils - - -class TestExpectedExceptions(test_utils.BaseTestCase): - - def test_exception(self): - e = None - try: - try: - raise ValueError() - except Exception: - raise oslo_messaging.ExpectedException() - except oslo_messaging.ExpectedException as e: - self.assertIsInstance(e, oslo_messaging.ExpectedException) - self.assertTrue(hasattr(e, 'exc_info')) - self.assertIsInstance(e.exc_info[1], ValueError) - - def test_decorator_expected(self): - class FooException(Exception): - pass - - @oslo_messaging.expected_exceptions(FooException) - def naughty(): - raise FooException() - - self.assertRaises(oslo_messaging.ExpectedException, naughty) - - def test_decorator_expected_subclass(self): - class FooException(Exception): - pass - - class BarException(FooException): - pass - - @oslo_messaging.expected_exceptions(FooException) - def naughty(): - raise BarException() - - self.assertRaises(oslo_messaging.ExpectedException, naughty) - - def test_decorator_unexpected(self): - class FooException(Exception): - pass - - @oslo_messaging.expected_exceptions(FooException) - def really_naughty(): - raise ValueError() - - self.assertRaises(ValueError, really_naughty) diff --git a/oslo_messaging/tests/test_fixture.py b/oslo_messaging/tests/test_fixture.py deleted file mode 100644 index a8c2eb6..0000000 --- a/oslo_messaging/tests/test_fixture.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from oslo_messaging import conffixture -from oslo_messaging.tests import utils as test_utils - - -class TestConfFixture(test_utils.BaseTestCase): - - def test_fixture_wraps_set_override(self): - conf = self.messaging_conf.conf - self.assertIsNotNone(conf.set_override.wrapped) - self.messaging_conf._teardown_decorator() - self.assertFalse(hasattr(conf.set_override, 'wrapped')) - - def test_fixture_wraps_clear_override(self): - conf = self.messaging_conf.conf - self.assertIsNotNone(conf.clear_override.wrapped) - self.messaging_conf._teardown_decorator() - self.assertFalse(hasattr(conf.clear_override, 'wrapped')) - - def test_fixture_setup_teardown_decorator(self): - conf = cfg.ConfigOpts() - self.assertFalse(hasattr(conf.set_override, 'wrapped')) - self.assertFalse(hasattr(conf.clear_override, 'wrapped')) - fixture = conffixture.ConfFixture(conf) - self.assertFalse(hasattr(conf.set_override, 'wrapped')) - self.assertFalse(hasattr(conf.clear_override, 'wrapped')) - self.useFixture(fixture) - self.assertTrue(hasattr(conf.set_override, 'wrapped')) - self.assertTrue(hasattr(conf.clear_override, 'wrapped')) - fixture._teardown_decorator() - self.assertFalse(hasattr(conf.set_override, 'wrapped')) - self.assertFalse(hasattr(conf.clear_override, 'wrapped')) - - def test_fixture_properties(self): - conf = self.messaging_conf.conf - self.messaging_conf.transport_driver = 'fake' - self.assertEqual('fake', - self.messaging_conf.transport_driver) - self.assertEqual('fake', - conf.rpc_backend) - - def test_old_notifications_config_override(self): - conf = self.messaging_conf.conf - conf.set_override( - "notification_driver", "messaging") - conf.set_override( - "notification_transport_url", "http://xyz") - conf.set_override( - "notification_topics", ['topic1']) - - self.assertEqual("messaging", - conf.oslo_messaging_notifications.driver) - self.assertEqual("http://xyz", - conf.oslo_messaging_notifications.transport_url) - self.assertEqual(['topic1'], - conf.oslo_messaging_notifications.topics) - - conf.clear_override("notification_driver") - conf.clear_override("notification_transport_url") - conf.clear_override("notification_topics") - - self.assertEqual([], - conf.oslo_messaging_notifications.driver) - self.assertIsNone(conf.oslo_messaging_notifications.transport_url) - self.assertEqual(['notifications'], - conf.oslo_messaging_notifications.topics) diff --git a/oslo_messaging/tests/test_opts.py b/oslo_messaging/tests/test_opts.py deleted file mode 100644 index 0e4b1f8..0000000 --- a/oslo_messaging/tests/test_opts.py +++ /dev/null @@ -1,75 +0,0 @@ - -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import stevedore -import testtools - -import mock - -from oslo_messaging import server -try: - from oslo_messaging import opts -except ImportError: - opts = None -from oslo_messaging.tests import utils as test_utils - - -class OptsTestCase(test_utils.BaseTestCase): - - @testtools.skipIf(opts is None, "Options not importable") - def setUp(self): - super(OptsTestCase, self).setUp() - - def _test_list_opts(self, result): - self.assertEqual(6, len(result)) - - groups = [g for (g, l) in result] - self.assertIn(None, groups) - self.assertIn('matchmaker_redis', groups) - self.assertIn('oslo_messaging_zmq', groups) - self.assertIn('oslo_messaging_amqp', groups) - self.assertIn('oslo_messaging_notifications', groups) - self.assertIn('oslo_messaging_rabbit', groups) - - opt_names = [o.name for (g, l) in result for o in l] - self.assertIn('rpc_backend', opt_names) - - def test_list_opts(self): - self._test_list_opts(opts.list_opts()) - - def test_entry_point(self): - result = None - for ext in stevedore.ExtensionManager('oslo.config.opts', - invoke_on_load=True): - if ext.name == "oslo.messaging": - result = ext.obj - break - - self.assertIsNotNone(result) - self._test_list_opts(result) - - def test_defaults(self): - transport = mock.Mock() - transport.conf = self.conf - - class MessageHandlingServerImpl(server.MessageHandlingServer): - def _create_listener(self): - pass - - def _process_incoming(self, incoming): - pass - - MessageHandlingServerImpl(transport, mock.Mock()) - opts.set_defaults(self.conf, executor_thread_pool_size=100) - self.assertEqual(100, self.conf.executor_thread_pool_size) diff --git a/oslo_messaging/tests/test_serializer.py b/oslo_messaging/tests/test_serializer.py deleted file mode 100644 index 858da45..0000000 --- a/oslo_messaging/tests/test_serializer.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context as common_context -from six.moves import mock - -from oslo_messaging import serializer -from oslo_messaging.tests import utils as test_utils - - -class TestRequestContextSerializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestRequestContextSerializer, self).setUp() - - self.serializer = serializer.RequestContextSerializer(mock.MagicMock()) - self.context = common_context.RequestContext() - self.entity = {'foo': 'bar'} - - def test_serialize_entity(self): - self.serializer.serialize_entity(self.context, self.entity) - self.serializer._base.serialize_entity.assert_called_with( - self.context, self.entity) - - def test_serialize_entity_empty_base(self): - # NOTE(viktors): Return False for check `if self.serializer._base:` - bool_args = {'__bool__': lambda *args: False, - '__nonzero__': lambda *args: False} - self.serializer._base.configure_mock(**bool_args) - - entity = self.serializer.serialize_entity(self.context, self.entity) - self.assertFalse(self.serializer._base.serialize_entity.called) - self.assertEqual(self.entity, entity) - - def test_deserialize_entity(self): - self.serializer.deserialize_entity(self.context, self.entity) - self.serializer._base.deserialize_entity.assert_called_with( - self.context, self.entity) - - def test_deserialize_entity_empty_base(self): - # NOTE(viktors): Return False for check `if self.serializer._base:` - bool_args = {'__bool__': lambda *args: False, - '__nonzero__': lambda *args: False} - self.serializer._base.configure_mock(**bool_args) - - entity = self.serializer.deserialize_entity(self.context, self.entity) - self.assertFalse(self.serializer._base.serialize_entity.called) - self.assertEqual(self.entity, entity) - - def test_serialize_context(self): - new_context = self.serializer.serialize_context(self.context) - - self.assertEqual(self.context.to_dict(), new_context) - - @mock.patch.object(common_context.RequestContext, 'from_dict', - return_value='foobar') - def test_deserialize_context(self, mock_to_dict): - new_context = self.serializer.deserialize_context(self.context) - - mock_to_dict.assert_called_with(self.context) - self.assertEqual( - common_context.RequestContext.from_dict(self.context), - new_context - ) diff --git a/oslo_messaging/tests/test_target.py b/oslo_messaging/tests/test_target.py deleted file mode 100644 index 049f4f7..0000000 --- a/oslo_messaging/tests/test_target.py +++ /dev/null @@ -1,177 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testscenarios - -import oslo_messaging -from oslo_messaging.tests import utils as test_utils - -load_tests = testscenarios.load_tests_apply_scenarios - - -class TargetConstructorTestCase(test_utils.BaseTestCase): - - scenarios = [ - ('all_none', dict(kwargs=dict())), - ('exchange', dict(kwargs=dict(exchange='testexchange'))), - ('topic', dict(kwargs=dict(topic='testtopic'))), - ('namespace', dict(kwargs=dict(namespace='testnamespace'))), - ('version', dict(kwargs=dict(version='3.4'))), - ('server', dict(kwargs=dict(server='testserver'))), - ('fanout', dict(kwargs=dict(fanout=True))), - ] - - def test_constructor(self): - target = oslo_messaging.Target(**self.kwargs) - for k in self.kwargs: - self.assertEqual(self.kwargs[k], getattr(target, k)) - for k in ['exchange', 'topic', 'namespace', - 'version', 'server', 'fanout']: - if k in self.kwargs: - continue - self.assertIsNone(getattr(target, k)) - - -class TargetCallableTestCase(test_utils.BaseTestCase): - - scenarios = [ - ('all_none', dict(attrs=dict(), kwargs=dict(), vals=dict())), - ('exchange_attr', dict(attrs=dict(exchange='testexchange'), - kwargs=dict(), - vals=dict(exchange='testexchange'))), - ('exchange_arg', dict(attrs=dict(), - kwargs=dict(exchange='testexchange'), - vals=dict(exchange='testexchange'))), - ('topic_attr', dict(attrs=dict(topic='testtopic'), - kwargs=dict(), - vals=dict(topic='testtopic'))), - ('topic_arg', dict(attrs=dict(), - kwargs=dict(topic='testtopic'), - vals=dict(topic='testtopic'))), - ('namespace_attr', dict(attrs=dict(namespace='testnamespace'), - kwargs=dict(), - vals=dict(namespace='testnamespace'))), - ('namespace_arg', dict(attrs=dict(), - kwargs=dict(namespace='testnamespace'), - vals=dict(namespace='testnamespace'))), - ('version_attr', dict(attrs=dict(version='3.4'), - kwargs=dict(), - vals=dict(version='3.4'))), - ('version_arg', dict(attrs=dict(), - kwargs=dict(version='3.4'), - vals=dict(version='3.4'))), - ('server_attr', dict(attrs=dict(server='testserver'), - kwargs=dict(), - vals=dict(server='testserver'))), - ('server_arg', dict(attrs=dict(), - kwargs=dict(server='testserver'), - vals=dict(server='testserver'))), - ('fanout_attr', dict(attrs=dict(fanout=True), - kwargs=dict(), - vals=dict(fanout=True))), - ('fanout_arg', dict(attrs=dict(), - kwargs=dict(fanout=True), - vals=dict(fanout=True))), - ] - - def test_callable(self): - target = oslo_messaging.Target(**self.attrs) - target = target(**self.kwargs) - for k in self.vals: - self.assertEqual(self.vals[k], getattr(target, k)) - for k in ['exchange', 'topic', 'namespace', - 'version', 'server', 'fanout']: - if k in self.vals: - continue - self.assertIsNone(getattr(target, k)) - - -class TargetReprTestCase(test_utils.BaseTestCase): - - scenarios = [ - ('all_none', dict(kwargs=dict(), repr='')), - ('exchange', dict(kwargs=dict(exchange='testexchange'), - repr='exchange=testexchange')), - ('topic', dict(kwargs=dict(topic='testtopic'), - repr='topic=testtopic')), - ('namespace', dict(kwargs=dict(namespace='testnamespace'), - repr='namespace=testnamespace')), - ('version', dict(kwargs=dict(version='3.4'), - repr='version=3.4')), - ('server', dict(kwargs=dict(server='testserver'), - repr='server=testserver')), - ('fanout', dict(kwargs=dict(fanout=True), - repr='fanout=True')), - ('exchange_and_fanout', dict(kwargs=dict(exchange='testexchange', - fanout=True), - repr='exchange=testexchange, ' - 'fanout=True')), - ] - - def test_repr(self): - target = oslo_messaging.Target(**self.kwargs) - self.assertEqual('', str(target)) - - -_notset = object() - - -class EqualityTestCase(test_utils.BaseTestCase): - - @classmethod - def generate_scenarios(cls): - attr = [ - ('exchange', dict(attr='exchange')), - ('topic', dict(attr='topic')), - ('namespace', dict(attr='namespace')), - ('version', dict(attr='version')), - ('server', dict(attr='server')), - ('fanout', dict(attr='fanout')), - ] - a = [ - ('a_notset', dict(a_value=_notset)), - ('a_none', dict(a_value=None)), - ('a_empty', dict(a_value='')), - ('a_foo', dict(a_value='foo')), - ('a_bar', dict(a_value='bar')), - ] - b = [ - ('b_notset', dict(b_value=_notset)), - ('b_none', dict(b_value=None)), - ('b_empty', dict(b_value='')), - ('b_foo', dict(b_value='foo')), - ('b_bar', dict(b_value='bar')), - ] - - cls.scenarios = testscenarios.multiply_scenarios(attr, a, b) - for s in cls.scenarios: - s[1]['equals'] = (s[1]['a_value'] == s[1]['b_value']) - - def test_equality(self): - a_kwargs = {self.attr: self.a_value} - b_kwargs = {self.attr: self.b_value} - - a = oslo_messaging.Target(**a_kwargs) - b = oslo_messaging.Target(**b_kwargs) - - if self.equals: - self.assertEqual(a, b) - self.assertFalse(a != b) - else: - self.assertNotEqual(a, b) - self.assertFalse(a == b) - - -EqualityTestCase.generate_scenarios() diff --git a/oslo_messaging/tests/test_transport.py b/oslo_messaging/tests/test_transport.py deleted file mode 100644 index 01ead7e..0000000 --- a/oslo_messaging/tests/test_transport.py +++ /dev/null @@ -1,390 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock -from mox3 import mox -from oslo_config import cfg -import six -from stevedore import driver -import testscenarios - -import oslo_messaging -from oslo_messaging.tests import utils as test_utils -from oslo_messaging import transport - -load_tests = testscenarios.load_tests_apply_scenarios - - -class _FakeDriver(object): - - def __init__(self, conf): - self.conf = conf - - def send(self, *args, **kwargs): - pass - - def send_notification(self, *args, **kwargs): - pass - - def listen(self, target, batch_size, batch_timeout): - pass - - -class _FakeManager(object): - - def __init__(self, driver): - self.driver = driver - - -class GetTransportTestCase(test_utils.BaseTestCase): - - scenarios = [ - ('rpc_backend', - dict(url=None, transport_url=None, rpc_backend='testbackend', - control_exchange=None, allowed=None, aliases=None, - expect=dict(backend='testbackend', - exchange=None, - url='testbackend:', - allowed=[]))), - ('transport_url', - dict(url=None, transport_url='testtransport:', rpc_backend=None, - control_exchange=None, allowed=None, aliases=None, - expect=dict(backend='testtransport', - exchange=None, - url='testtransport:', - allowed=[]))), - ('url_param', - dict(url='testtransport:', transport_url=None, rpc_backend=None, - control_exchange=None, allowed=None, aliases=None, - expect=dict(backend='testtransport', - exchange=None, - url='testtransport:', - allowed=[]))), - ('control_exchange', - dict(url=None, transport_url=None, rpc_backend='testbackend', - control_exchange='testexchange', allowed=None, aliases=None, - expect=dict(backend='testbackend', - exchange='testexchange', - url='testbackend:', - allowed=[]))), - ('allowed_remote_exmods', - dict(url=None, transport_url=None, rpc_backend='testbackend', - control_exchange=None, allowed=['foo', 'bar'], aliases=None, - expect=dict(backend='testbackend', - exchange=None, - url='testbackend:', - allowed=['foo', 'bar']))), - ('rpc_backend_aliased', - dict(url=None, transport_url=None, rpc_backend='testfoo', - control_exchange=None, allowed=None, - aliases=dict(testfoo='testbackend'), - expect=dict(backend='testbackend', - exchange=None, - url='testbackend:', - allowed=[]))), - ('transport_url_aliased', - dict(url=None, transport_url='testfoo:', rpc_backend=None, - control_exchange=None, allowed=None, - aliases=dict(testfoo='testtransport'), - expect=dict(backend='testtransport', - exchange=None, - url='testtransport:', - allowed=[]))), - ('url_param_aliased', - dict(url='testfoo:', transport_url=None, rpc_backend=None, - control_exchange=None, allowed=None, - aliases=dict(testfoo='testtransport'), - expect=dict(backend='testtransport', - exchange=None, - url='testtransport:', - allowed=[]))), - ] - - @mock.patch('oslo_messaging.transport.LOG') - def test_get_transport(self, fake_logger): - self.config(rpc_backend=self.rpc_backend, - control_exchange=self.control_exchange, - transport_url=self.transport_url) - - self.mox.StubOutWithMock(driver, 'DriverManager') - - invoke_args = [self.conf, - oslo_messaging.TransportURL.parse(self.conf, - self.expect['url'])] - invoke_kwds = dict(default_exchange=self.expect['exchange'], - allowed_remote_exmods=self.expect['allowed']) - - drvr = _FakeDriver(self.conf) - driver.DriverManager('oslo.messaging.drivers', - self.expect['backend'], - invoke_on_load=True, - invoke_args=invoke_args, - invoke_kwds=invoke_kwds).\ - AndReturn(_FakeManager(drvr)) - - self.mox.ReplayAll() - - kwargs = dict(url=self.url) - if self.allowed is not None: - kwargs['allowed_remote_exmods'] = self.allowed - if self.aliases is not None: - kwargs['aliases'] = self.aliases - transport_ = oslo_messaging.get_transport(self.conf, **kwargs) - - if self.aliases is not None: - self.assertEqual( - [mock.call('legacy "rpc_backend" is deprecated, ' - '"testfoo" must be replaced by ' - '"%s"' % self.aliases.get('testfoo'))], - fake_logger.warning.mock_calls - ) - - self.assertIsNotNone(transport_) - self.assertIs(transport_.conf, self.conf) - self.assertIs(transport_._driver, drvr) - - -class GetTransportSadPathTestCase(test_utils.BaseTestCase): - - scenarios = [ - ('invalid_transport_url', - dict(url=None, transport_url='invalid', rpc_backend=None, - ex=dict(cls=oslo_messaging.InvalidTransportURL, - msg_contains='No scheme specified', - url='invalid'))), - ('invalid_url_param', - dict(url='invalid', transport_url=None, rpc_backend=None, - ex=dict(cls=oslo_messaging.InvalidTransportURL, - msg_contains='No scheme specified', - url='invalid'))), - ('driver_load_failure', - dict(url=None, transport_url=None, rpc_backend='testbackend', - ex=dict(cls=oslo_messaging.DriverLoadFailure, - msg_contains='Failed to load', - driver='testbackend'))), - ] - - def test_get_transport_sad(self): - self.config(rpc_backend=self.rpc_backend, - transport_url=self.transport_url) - - if self.rpc_backend: - self.mox.StubOutWithMock(driver, 'DriverManager') - - invoke_args = [self.conf, - oslo_messaging.TransportURL.parse(self.conf, - self.url)] - invoke_kwds = dict(default_exchange='openstack', - allowed_remote_exmods=[]) - - driver.DriverManager('oslo.messaging.drivers', - self.rpc_backend, - invoke_on_load=True, - invoke_args=invoke_args, - invoke_kwds=invoke_kwds).\ - AndRaise(RuntimeError()) - - self.mox.ReplayAll() - - try: - oslo_messaging.get_transport(self.conf, url=self.url) - self.assertFalse(True) - except Exception as ex: - ex_cls = self.ex.pop('cls') - ex_msg_contains = self.ex.pop('msg_contains') - - self.assertIsInstance(ex, oslo_messaging.MessagingException) - self.assertIsInstance(ex, ex_cls) - self.assertIn(ex_msg_contains, six.text_type(ex)) - - for k, v in self.ex.items(): - self.assertTrue(hasattr(ex, k)) - self.assertEqual(v, str(getattr(ex, k))) - - -# FIXME(markmc): this could be used elsewhere -class _SetDefaultsFixture(fixtures.Fixture): - - def __init__(self, set_defaults, opts, *names): - super(_SetDefaultsFixture, self).__init__() - self.set_defaults = set_defaults - self.opts = opts - self.names = names - - def setUp(self): - super(_SetDefaultsFixture, self).setUp() - - # FIXME(markmc): this comes from Id5c1f3ba - def first(seq, default=None, key=None): - if key is None: - key = bool - return next(six.moves.filter(key, seq), default) - - def default(opts, name): - return first(opts, key=lambda o: o.name == name).default - - orig_defaults = {} - for n in self.names: - orig_defaults[n] = default(self.opts, n) - - def restore_defaults(): - self.set_defaults(**orig_defaults) - - self.addCleanup(restore_defaults) - - -class TestSetDefaults(test_utils.BaseTestCase): - - def setUp(self): - super(TestSetDefaults, self).setUp(conf=cfg.ConfigOpts()) - self.useFixture(_SetDefaultsFixture( - oslo_messaging.set_transport_defaults, - transport._transport_opts, - 'control_exchange')) - - def test_set_default_control_exchange(self): - oslo_messaging.set_transport_defaults(control_exchange='foo') - - self.mox.StubOutWithMock(driver, 'DriverManager') - invoke_kwds = mox.ContainsKeyValue('default_exchange', 'foo') - driver.DriverManager(mox.IgnoreArg(), - mox.IgnoreArg(), - invoke_on_load=mox.IgnoreArg(), - invoke_args=mox.IgnoreArg(), - invoke_kwds=invoke_kwds).\ - AndReturn(_FakeManager(_FakeDriver(self.conf))) - self.mox.ReplayAll() - - oslo_messaging.get_transport(self.conf) - - -class TestTransportMethodArgs(test_utils.BaseTestCase): - - _target = oslo_messaging.Target(topic='topic', server='server') - - def test_send_defaults(self): - t = transport.Transport(_FakeDriver(cfg.CONF)) - - self.mox.StubOutWithMock(t._driver, 'send') - t._driver.send(self._target, 'ctxt', 'message', - wait_for_reply=None, - timeout=None, retry=None) - self.mox.ReplayAll() - - t._send(self._target, 'ctxt', 'message') - - def test_send_all_args(self): - t = transport.Transport(_FakeDriver(cfg.CONF)) - - self.mox.StubOutWithMock(t._driver, 'send') - t._driver.send(self._target, 'ctxt', 'message', - wait_for_reply='wait_for_reply', - timeout='timeout', retry='retry') - self.mox.ReplayAll() - - t._send(self._target, 'ctxt', 'message', - wait_for_reply='wait_for_reply', - timeout='timeout', retry='retry') - - def test_send_notification(self): - t = transport.Transport(_FakeDriver(cfg.CONF)) - - self.mox.StubOutWithMock(t._driver, 'send_notification') - t._driver.send_notification(self._target, 'ctxt', 'message', 1.0, - retry=None) - self.mox.ReplayAll() - - t._send_notification(self._target, 'ctxt', 'message', version=1.0) - - def test_send_notification_all_args(self): - t = transport.Transport(_FakeDriver(cfg.CONF)) - - self.mox.StubOutWithMock(t._driver, 'send_notification') - t._driver.send_notification(self._target, 'ctxt', 'message', 1.0, - retry=5) - self.mox.ReplayAll() - - t._send_notification(self._target, 'ctxt', 'message', version=1.0, - retry=5) - - def test_listen(self): - t = transport.Transport(_FakeDriver(cfg.CONF)) - - self.mox.StubOutWithMock(t._driver, 'listen') - t._driver.listen(self._target, 1, None) - self.mox.ReplayAll() - - t._listen(self._target, 1, None) - - -class TestTransportUrlCustomisation(test_utils.BaseTestCase): - def setUp(self): - super(TestTransportUrlCustomisation, self).setUp() - - def transport_url_parse(url): - return transport.TransportURL.parse(self.conf, url) - - self.url1 = transport_url_parse("fake://vhost1?x=1&y=2&z=3") - self.url2 = transport_url_parse("fake://vhost2?foo=bar") - self.url3 = transport_url_parse("fake://vhost1?l=1&l=2&l=3") - self.url4 = transport_url_parse("fake://vhost2?d=x:1&d=y:2&d=z:3") - - def test_hash(self): - urls = {} - urls[self.url1] = self.url1 - urls[self.url2] = self.url2 - urls[self.url3] = self.url3 - urls[self.url4] = self.url4 - self.assertEqual(2, len(urls)) - - def test_eq(self): - self.assertEqual(self.url1, self.url3) - self.assertEqual(self.url2, self.url4) - self.assertNotEqual(self.url1, self.url4) - - def test_query(self): - self.assertEqual({'x': '1', 'y': '2', 'z': '3'}, self.url1.query) - self.assertEqual({'foo': 'bar'}, self.url2.query) - self.assertEqual({'l': '1,2,3'}, self.url3.query) - self.assertEqual({'d': 'x:1,y:2,z:3'}, self.url4.query) - - -class TestTransportHostCustomisation(test_utils.BaseTestCase): - def setUp(self): - super(TestTransportHostCustomisation, self).setUp() - self.host1 = transport.TransportHost("host1", 5662, "user", "pass") - self.host2 = transport.TransportHost("host1", 5662, "user", "pass") - self.host3 = transport.TransportHost("host1", 5663, "user", "pass") - self.host4 = transport.TransportHost("host1", 5662, "user2", "pass") - self.host5 = transport.TransportHost("host1", 5662, "user", "pass2") - self.host6 = transport.TransportHost("host2", 5662, "user", "pass") - - def test_hash(self): - hosts = {} - hosts[self.host1] = self.host1 - hosts[self.host2] = self.host2 - hosts[self.host3] = self.host3 - hosts[self.host4] = self.host4 - hosts[self.host5] = self.host5 - hosts[self.host6] = self.host6 - self.assertEqual(5, len(hosts)) - - def test_eq(self): - self.assertEqual(self.host1, self.host2) - self.assertNotEqual(self.host1, self.host3) - self.assertNotEqual(self.host1, self.host4) - self.assertNotEqual(self.host1, self.host5) - self.assertNotEqual(self.host1, self.host6) diff --git a/oslo_messaging/tests/test_urls.py b/oslo_messaging/tests/test_urls.py deleted file mode 100644 index 176fb75..0000000 --- a/oslo_messaging/tests/test_urls.py +++ /dev/null @@ -1,245 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testscenarios - -import oslo_messaging -from oslo_messaging.tests import utils as test_utils - -load_tests = testscenarios.load_tests_apply_scenarios - - -class TestParseURL(test_utils.BaseTestCase): - - scenarios = [ - ('transport', - dict(url='foo:', aliases=None, - expect=dict(transport='foo'))), - ('transport_aliased', - dict(url='bar:', aliases=dict(bar='foo'), - expect=dict(transport='foo'))), - ('virtual_host_slash', - dict(url='foo:////', aliases=None, - expect=dict(transport='foo', virtual_host='/'))), - ('virtual_host', - dict(url='foo:///bar', aliases=None, - expect=dict(transport='foo', virtual_host='bar'))), - ('host', - dict(url='foo://host/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='host'), - ]))), - ('ipv6_host', - dict(url='foo://[ffff::1]/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='ffff::1'), - ]))), - ('port', - dict(url='foo://host:1234/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='host', port=1234), - ]))), - ('ipv6_port', - dict(url='foo://[ffff::1]:1234/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='ffff::1', port=1234), - ]))), - ('username', - dict(url='foo://u@host:1234/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='host', port=1234, username='u'), - ]))), - ('password', - dict(url='foo://u:p@host:1234/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='host', port=1234, - username='u', password='p'), - ]))), - ('creds_no_host', - dict(url='foo://u:p@/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(username='u', password='p'), - ]))), - ('multi_host', - dict(url='foo://u:p@host1:1234,host2:4321/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='host1', port=1234, - username='u', password='p'), - dict(host='host2', port=4321), - ]))), - ('multi_creds', - dict(url='foo://u1:p1@host1:1234,u2:p2@host2:4321/bar', aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='host1', port=1234, - username='u1', password='p1'), - dict(host='host2', port=4321, - username='u2', password='p2'), - ]))), - ('multi_creds_ipv6', - dict(url='foo://u1:p1@[ffff::1]:1234,u2:p2@[ffff::2]:4321/bar', - aliases=None, - expect=dict(transport='foo', - virtual_host='bar', - hosts=[ - dict(host='ffff::1', port=1234, - username='u1', password='p1'), - dict(host='ffff::2', port=4321, - username='u2', password='p2'), - ]))), - ('quoting', - dict(url='foo://u%24:p%26@host:1234/%24', aliases=None, - expect=dict(transport='foo', - virtual_host='$', - hosts=[ - dict(host='host', port=1234, - username='u$', password='p&'), - ]))), - ] - - def test_parse_url(self): - self.config(rpc_backend=None) - - url = oslo_messaging.TransportURL.parse(self.conf, self.url, - self.aliases) - - hosts = [] - for host in self.expect.get('hosts', []): - hosts.append(oslo_messaging.TransportHost(host.get('host'), - host.get('port'), - host.get('username'), - host.get('password'))) - expected = oslo_messaging.TransportURL(self.conf, - self.expect.get('transport'), - self.expect.get('virtual_host'), - hosts) - - self.assertEqual(expected, url) - - -class TestFormatURL(test_utils.BaseTestCase): - - scenarios = [ - ('rpc_backend', - dict(rpc_backend='testbackend', - transport=None, - virtual_host=None, - hosts=[], - aliases=None, - expected='testbackend:///')), - ('rpc_backend_aliased', - dict(rpc_backend='testfoo', - transport=None, - virtual_host=None, - hosts=[], - aliases=dict(testfoo='testbackend'), - expected='testbackend:///')), - ('transport', - dict(rpc_backend=None, - transport='testtransport', - virtual_host=None, - hosts=[], - aliases=None, - expected='testtransport:///')), - ('transport_aliased', - dict(rpc_backend=None, - transport='testfoo', - virtual_host=None, - hosts=[], - aliases=dict(testfoo='testtransport'), - expected='testtransport:///')), - ('virtual_host', - dict(rpc_backend=None, - transport='testtransport', - virtual_host='/vhost', - hosts=[], - aliases=None, - expected='testtransport:////vhost')), - ('host', - dict(rpc_backend=None, - transport='testtransport', - virtual_host='/', - hosts=[ - dict(hostname='host', - port=10, - username='bob', - password='secret'), - ], - aliases=None, - expected='testtransport://bob:secret@host:10//')), - ('multi_host', - dict(rpc_backend=None, - transport='testtransport', - virtual_host='', - hosts=[ - dict(hostname='h1', - port=1000, - username='b1', - password='s1'), - dict(hostname='h2', - port=2000, - username='b2', - password='s2'), - ], - aliases=None, - expected='testtransport://b1:s1@h1:1000,b2:s2@h2:2000/')), - ('quoting', - dict(rpc_backend=None, - transport='testtransport', - virtual_host='/$', - hosts=[ - dict(hostname='host', - port=10, - username='b$', - password='s&'), - ], - aliases=None, - expected='testtransport://b%24:s%26@host:10//%24')), - ] - - def test_parse_url(self): - self.config(rpc_backend=self.rpc_backend) - - hosts = [] - for host in self.hosts: - hosts.append(oslo_messaging.TransportHost(host.get('hostname'), - host.get('port'), - host.get('username'), - host.get('password'))) - - url = oslo_messaging.TransportURL(self.conf, - self.transport, - self.virtual_host, - hosts, - self.aliases) - - self.assertEqual(self.expected, str(url)) diff --git a/oslo_messaging/tests/test_utils.py b/oslo_messaging/tests/test_utils.py deleted file mode 100644 index 908c25f..0000000 --- a/oslo_messaging/tests/test_utils.py +++ /dev/null @@ -1,99 +0,0 @@ - -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_messaging._drivers import common -from oslo_messaging import _utils as utils -from oslo_messaging.tests import utils as test_utils -from six.moves import mock - - -class VersionIsCompatibleTestCase(test_utils.BaseTestCase): - def test_version_is_compatible_same(self): - self.assertTrue(utils.version_is_compatible('1.23', '1.23')) - - def test_version_is_compatible_newer_minor(self): - self.assertTrue(utils.version_is_compatible('1.24', '1.23')) - - def test_version_is_compatible_older_minor(self): - self.assertFalse(utils.version_is_compatible('1.22', '1.23')) - - def test_version_is_compatible_major_difference1(self): - self.assertFalse(utils.version_is_compatible('2.23', '1.23')) - - def test_version_is_compatible_major_difference2(self): - self.assertFalse(utils.version_is_compatible('1.23', '2.23')) - - def test_version_is_compatible_newer_rev(self): - self.assertFalse(utils.version_is_compatible('1.23', '1.23.1')) - - def test_version_is_compatible_newer_rev_both(self): - self.assertFalse(utils.version_is_compatible('1.23.1', '1.23.2')) - - def test_version_is_compatible_older_rev_both(self): - self.assertTrue(utils.version_is_compatible('1.23.2', '1.23.1')) - - def test_version_is_compatible_older_rev(self): - self.assertTrue(utils.version_is_compatible('1.24', '1.23.1')) - - def test_version_is_compatible_no_rev_is_zero(self): - self.assertTrue(utils.version_is_compatible('1.23.0', '1.23')) - - -class TimerTestCase(test_utils.BaseTestCase): - def test_no_duration_no_callback(self): - t = common.DecayingTimer() - t.start() - remaining = t.check_return() - self.assertIsNone(remaining) - - def test_no_duration_but_maximum(self): - t = common.DecayingTimer() - t.start() - remaining = t.check_return(maximum=2) - self.assertEqual(2, remaining) - - @mock.patch('oslo_utils.timeutils.now') - def test_duration_expired_no_callback(self, now): - now.return_value = 0 - t = common.DecayingTimer(2) - t.start() - - now.return_value = 3 - remaining = t.check_return() - self.assertEqual(0, remaining) - - @mock.patch('oslo_utils.timeutils.now') - def test_duration_callback(self, now): - now.return_value = 0 - t = common.DecayingTimer(2) - t.start() - - now.return_value = 3 - callback = mock.Mock() - remaining = t.check_return(callback) - self.assertEqual(0, remaining) - callback.assert_called_once_with() - - @mock.patch('oslo_utils.timeutils.now') - def test_duration_callback_with_args(self, now): - now.return_value = 0 - t = common.DecayingTimer(2) - t.start() - - now.return_value = 3 - callback = mock.Mock() - remaining = t.check_return(callback, 1, a='b') - self.assertEqual(0, remaining) - callback.assert_called_once_with(1, a='b') diff --git a/oslo_messaging/tests/utils.py b/oslo_messaging/tests/utils.py deleted file mode 100644 index c5fca87..0000000 --- a/oslo_messaging/tests/utils.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Common utilities used in testing""" - -import threading - -from oslo_config import cfg -from oslotest import base -from oslotest import moxstubout -import six - -TRUE_VALUES = ('true', '1', 'yes') - - -class BaseTestCase(base.BaseTestCase): - - def setUp(self, conf=cfg.CONF): - super(BaseTestCase, self).setUp() - - from oslo_messaging import conffixture - self.messaging_conf = self.useFixture(conffixture.ConfFixture(conf)) - self.messaging_conf.transport_driver = 'fake' - self.conf = self.messaging_conf.conf - - self.conf.project = 'project' - self.conf.prog = 'prog' - - moxfixture = self.useFixture(moxstubout.MoxStubout()) - self.mox = moxfixture.mox - self.stubs = moxfixture.stubs - - def config(self, **kw): - """Override some configuration values. - - The keyword arguments are the names of configuration options to - override and their values. - - If a group argument is supplied, the overrides are applied to - the specified configuration option group. - - All overrides are automatically cleared at the end of the current - test by the tearDown() method. - """ - group = kw.pop('group', None) - for k, v in six.iteritems(kw): - self.conf.set_override(k, v, group, enforce_type=True) - - -class ServerThreadHelper(threading.Thread): - def __init__(self, server): - super(ServerThreadHelper, self).__init__() - self.daemon = True - self._server = server - self._stop_event = threading.Event() - - def run(self): - self._server.start() - self._stop_event.wait() - # Check start() does nothing with a running listener - self._server.start() - self._server.stop() - self._server.wait() - - def stop(self): - self._stop_event.set() diff --git a/oslo_messaging/transport.py b/oslo_messaging/transport.py deleted file mode 100644 index 703c8aa..0000000 --- a/oslo_messaging/transport.py +++ /dev/null @@ -1,462 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# Copyright (c) 2012 Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'DriverLoadFailure', - 'InvalidTransportURL', - 'Transport', - 'TransportHost', - 'TransportURL', - 'get_transport', - 'set_transport_defaults', -] - -import logging - -from oslo_config import cfg -import six -from six.moves.urllib import parse -from stevedore import driver - -from oslo_messaging import exceptions - -LOG = logging.getLogger(__name__) - -_transport_opts = [ - cfg.StrOpt('transport_url', - secret=True, - help='A URL representing the messaging driver to use and its ' - 'full configuration.'), - cfg.StrOpt('rpc_backend', - deprecated_for_removal=True, - deprecated_reason="Replaced by [DEFAULT]/transport_url", - default='rabbit', - help='The messaging driver to use, defaults to rabbit. Other ' - 'drivers include amqp and zmq.'), - - cfg.StrOpt('control_exchange', - default='openstack', - help='The default exchange under which topics are scoped. May ' - 'be overridden by an exchange name specified in the ' - 'transport_url option.'), -] - - -def set_transport_defaults(control_exchange): - """Set defaults for messaging transport configuration options. - - :param control_exchange: the default exchange under which topics are scoped - :type control_exchange: str - """ - cfg.set_defaults(_transport_opts, - control_exchange=control_exchange) - - -class Transport(object): - - """A messaging transport. - - This is a mostly opaque handle for an underlying messaging transport - driver. - - It has a single 'conf' property which is the cfg.ConfigOpts instance used - to construct the transport object. - """ - - def __init__(self, driver): - self.conf = driver.conf - self._driver = driver - - def _require_driver_features(self, requeue=False): - self._driver.require_features(requeue=requeue) - - def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, - retry=None): - if not target.topic: - raise exceptions.InvalidTarget('A topic is required to send', - target) - return self._driver.send(target, ctxt, message, - wait_for_reply=wait_for_reply, - timeout=timeout, retry=retry) - - def _send_notification(self, target, ctxt, message, version, retry=None): - if not target.topic: - raise exceptions.InvalidTarget('A topic is required to send', - target) - self._driver.send_notification(target, ctxt, message, version, - retry=retry) - - def _listen(self, target, batch_size, batch_timeout): - if not (target.topic and target.server): - raise exceptions.InvalidTarget('A server\'s target must have ' - 'topic and server names specified', - target) - return self._driver.listen(target, batch_size, - batch_timeout) - - def _listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - for target, priority in targets_and_priorities: - if not target.topic: - raise exceptions.InvalidTarget('A target must have ' - 'topic specified', - target) - return self._driver.listen_for_notifications( - targets_and_priorities, pool, batch_size, batch_timeout - ) - - def cleanup(self): - """Release all resources associated with this transport.""" - self._driver.cleanup() - - -class InvalidTransportURL(exceptions.MessagingException): - """Raised if transport URL is invalid.""" - - def __init__(self, url, msg): - super(InvalidTransportURL, self).__init__(msg) - self.url = url - - -class DriverLoadFailure(exceptions.MessagingException): - """Raised if a transport driver can't be loaded.""" - - def __init__(self, driver, ex): - msg = 'Failed to load transport driver "%s": %s' % (driver, ex) - super(DriverLoadFailure, self).__init__(msg) - self.driver = driver - self.ex = ex - - -def get_transport(conf, url=None, allowed_remote_exmods=None, aliases=None): - """A factory method for Transport objects. - - This method will construct a Transport object from transport configuration - gleaned from the user's configuration and, optionally, a transport URL. - - If a transport URL is supplied as a parameter, any transport configuration - contained in it takes precedence. If no transport URL is supplied, but - there is a transport URL supplied in the user's configuration then that - URL will take the place of the URL parameter. In both cases, any - configuration not supplied in the transport URL may be taken from - individual configuration parameters in the user's configuration. - - An example transport URL might be:: - - rabbit://me:passwd@host:5672/virtual_host - - and can either be passed as a string or a TransportURL object. - - :param conf: the user configuration - :type conf: cfg.ConfigOpts - :param url: a transport URL - :type url: str or TransportURL - :param allowed_remote_exmods: a list of modules which a client using this - transport will deserialize remote exceptions - from - :type allowed_remote_exmods: list - :param aliases: A map of transport alias to transport name - :type aliases: dict - """ - allowed_remote_exmods = allowed_remote_exmods or [] - conf.register_opts(_transport_opts) - - if not isinstance(url, TransportURL): - url = TransportURL.parse(conf, url, aliases) - - kwargs = dict(default_exchange=conf.control_exchange, - allowed_remote_exmods=allowed_remote_exmods) - - try: - mgr = driver.DriverManager('oslo.messaging.drivers', - url.transport.split('+')[0], - invoke_on_load=True, - invoke_args=[conf, url], - invoke_kwds=kwargs) - except RuntimeError as ex: - raise DriverLoadFailure(url.transport, ex) - - return Transport(mgr.driver) - - -class TransportHost(object): - - """A host element of a parsed transport URL.""" - - def __init__(self, hostname=None, port=None, username=None, password=None): - self.hostname = hostname - self.port = port - self.username = username - self.password = password - - def __hash__(self): - return hash((self.hostname, self.port, self.username, self.password)) - - def __eq__(self, other): - return vars(self) == vars(other) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - attrs = [] - for a in ['hostname', 'port', 'username', 'password']: - v = getattr(self, a) - if v: - attrs.append((a, repr(v))) - values = ', '.join(['%s=%s' % i for i in attrs]) - return '' - - -class TransportURL(object): - - """A parsed transport URL. - - Transport URLs take the form:: - - transport://user:pass@host:port[,userN:passN@hostN:portN]/virtual_host?query - - i.e. the scheme selects the transport driver, you may include multiple - hosts in netloc, the path part is a "virtual host" partition path and - the query part contains some driver-specific options which may override - corresponding values from a static configuration. - - :param conf: a ConfigOpts instance - :type conf: oslo.config.cfg.ConfigOpts - :param transport: a transport name for example 'rabbit' - :type transport: str - :param virtual_host: a virtual host path for example '/' - :type virtual_host: str - :param hosts: a list of TransportHost objects - :type hosts: list - :param aliases: DEPRECATED: a map of transport alias to transport name - :type aliases: dict - :param query: a dictionary of URL query parameters - :type query: dict - """ - - def __init__(self, conf, transport=None, virtual_host=None, hosts=None, - aliases=None, query=None): - self.conf = conf - self.conf.register_opts(_transport_opts) - self._transport = transport - self.virtual_host = virtual_host - if hosts is None: - self.hosts = [] - else: - self.hosts = hosts - if aliases is None: - self.aliases = {} - else: - self.aliases = aliases - if query is None: - self.query = {} - else: - self.query = query - - self._deprecation_logged = False - - @property - def transport(self): - if self._transport is None: - transport = self.conf.rpc_backend - else: - transport = self._transport - final_transport = self.aliases.get(transport, transport) - if not self._deprecation_logged and final_transport != transport: - # NOTE(sileht): The first step is deprecate this one cycle. - # To ensure deployer have updated they configuration during Octavia - # Then in P we will deprecate aliases kwargs of TransportURL() and - # get_transport() for consuming application - LOG.warning('legacy "rpc_backend" is deprecated, ' - '"%(legacy_transport)s" must be replaced by ' - '"%(final_transport)s"' % { - 'legacy_transport': transport, - 'final_transport': final_transport}) - self._deprecation_logged = True - - return final_transport - - @transport.setter - def transport(self, value): - self._transport = value - - def __hash__(self): - return hash((tuple(self.hosts), self.transport, self.virtual_host)) - - def __eq__(self, other): - return (self.transport == other.transport and - self.virtual_host == other.virtual_host and - self.hosts == other.hosts) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - attrs = [] - for a in ['transport', 'virtual_host', 'hosts']: - v = getattr(self, a) - if v: - attrs.append((a, repr(v))) - values = ', '.join(['%s=%s' % i for i in attrs]) - return '' - - def __str__(self): - netlocs = [] - - for host in self.hosts: - username = host.username - password = host.password - hostname = host.hostname - port = host.port - - # Starting place for the network location - netloc = '' - - # Build the username and password portion of the transport URL - if username is not None or password is not None: - if username is not None: - netloc += parse.quote(username, '') - if password is not None: - netloc += ':%s' % parse.quote(password, '') - netloc += '@' - - # Build the network location portion of the transport URL - if hostname: - if ':' in hostname: - netloc += '[%s]' % hostname - else: - netloc += hostname - if port is not None: - netloc += ':%d' % port - - netlocs.append(netloc) - - # Assemble the transport URL - url = '%s://%s/' % (self.transport, ','.join(netlocs)) - - if self.virtual_host: - url += parse.quote(self.virtual_host) - - if self.query: - url += '?' + parse.urlencode(self.query, doseq=True) - - return url - - @classmethod - def parse(cls, conf, url=None, aliases=None): - """Parse an url. - - Assuming a URL takes the form of:: - - transport://user:pass@host:port[,userN:passN@hostN:portN]/virtual_host?query - - then parse the URL and return a TransportURL object. - - Netloc is parsed following the sequence bellow: - - * It is first split by ',' in order to support multiple hosts - * Username and password should be specified for each host, in - case of lack of specification they will be omitted:: - - user:pass@host1:port1,host2:port2 - - [ - {"username": "user", "password": "pass", "host": "host1:port1"}, - {"host": "host2:port2"} - ] - - If the url is not provided conf.transport_url is parsed instead. - - :param conf: a ConfigOpts instance - :type conf: oslo.config.cfg.ConfigOpts - :param url: The URL to parse - :type url: str - :param aliases: A map of transport alias to transport name - :type aliases: dict - :returns: A TransportURL - """ - - url = url or conf.transport_url - if not url: - return cls(conf, aliases=aliases) - - if not isinstance(url, six.string_types): - raise InvalidTransportURL(url, 'Wrong URL type') - - url = parse.urlparse(url) - - if not url.scheme: - raise InvalidTransportURL(url.geturl(), 'No scheme specified') - - transport = url.scheme - - query = {} - if url.query: - for key, values in six.iteritems(parse.parse_qs(url.query)): - query[key] = ','.join(values) - - virtual_host = None - if url.path.startswith('/'): - virtual_host = parse.unquote(url.path[1:]) - - hosts = [] - - for host in url.netloc.split(','): - if not host: - continue - - hostname = host - username = password = port = None - - if '@' in host: - username, hostname = host.split('@', 1) - if ':' in username: - username, password = username.split(':', 1) - password = parse.unquote(password) - username = parse.unquote(username) - - if not hostname: - hostname = None - elif hostname.startswith('['): - # Find the closing ']' and extract the hostname - host_end = hostname.find(']') - if host_end < 0: - # NOTE(Vek): Identical to what Python 2.7's - # urlparse.urlparse() raises in this case - raise ValueError('Invalid IPv6 URL') - - port_text = hostname[host_end:] - hostname = hostname[1:host_end] - - # Now we need the port; this is compliant with how urlparse - # parses the port data - port = None - if ':' in port_text: - port = int(port_text.split(':', 1)[1]) - elif ':' in hostname: - hostname, port = hostname.split(':', 1) - port = int(port) - - hosts.append(TransportHost(hostname=hostname, - port=port, - username=username, - password=password)) - - return cls(conf, transport, virtual_host, hosts, aliases, query) diff --git a/oslo_messaging/version.py b/oslo_messaging/version.py deleted file mode 100644 index b4cd76b..0000000 --- a/oslo_messaging/version.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import pbr.version - -version_info = pbr.version.VersionInfo('oslo_messaging') diff --git a/releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml b/releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml deleted file mode 100644 index 46a2da6..0000000 --- a/releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -other: - - Switch to reno for managing release notes. \ No newline at end of file diff --git a/releasenotes/notes/connection_ttl-2cf0fe6e1ab8c73c.yaml b/releasenotes/notes/connection_ttl-2cf0fe6e1ab8c73c.yaml deleted file mode 100644 index fafa33d..0000000 --- a/releasenotes/notes/connection_ttl-2cf0fe6e1ab8c73c.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - | Idle connections in the pool will be expired and closed. - | Default ttl is 1200s. Next configuration params was added - - * *conn_pool_ttl* (defaul 1200) - * *conn_pool_min_size* (default 2) diff --git a/releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d672a2.yaml b/releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d672a2.yaml deleted file mode 100644 index dc8ac5d..0000000 --- a/releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d672a2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - The rabbitmq driver option ``DEFAULT/max_retries`` has been deprecated - for removal (at a later point in the future) as it did not make logical - sense for notifications and for RPC. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 392c957..0000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# oslo.log Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'oslosphinx', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'oslo.messaging Release Notes' -copyright = u'2016, oslo.messaging Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from oslo_messaging.version import version_info as oslo_messaging_version -# The full version, including alpha/beta/rc tags. -release = oslo_messaging_version.version_string_with_vcs() -# The short X.Y version. -version = oslo_messaging_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'oslo.messagingReleaseNotesDoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'oslo.messagingReleaseNotes.tex', - u'oslo.messaging Release Notes Documentation', - u'oslo.messaging Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'oslo.messagingReleaseNotes', - u'oslo.messaging Release Notes Documentation', - [u'oslo.messaging Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'oslo.messagingReleaseNotes', - u'oslo.messaging Release Notes Documentation', - u'oslo.messaging Developers', 'oslo.messagingReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 97dbb6b..0000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -============================= - oslo.messaging Release Notes -============================= - - .. toctree:: - :maxdepth: 1 - - unreleased diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po deleted file mode 100644 index e3a6f81..0000000 --- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,30 +0,0 @@ -# Andi Chandler , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.messaging Release Notes 5.5.1\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2016-07-01 03:41+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-28 05:52+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en-GB\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "5.2.0" -msgstr "5.2.0" - -msgid "Other Notes" -msgstr "Other Notes" - -msgid "Switch to reno for managing release notes." -msgstr "Switch to reno for managing release notes." - -msgid "Unreleased Release Notes" -msgstr "Unreleased Release Notes" - -msgid "oslo.messaging Release Notes" -msgstr "oslo.messaging Release Notes" diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 5860a46..0000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -========================== - Unreleased Release Notes -========================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f58bf6b..0000000 --- a/requirements.txt +++ /dev/null @@ -1,46 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr>=1.6 # Apache-2.0 - -futurist!=0.15.0,>=0.11.0 # Apache-2.0 -oslo.config>=3.14.0 # Apache-2.0 -oslo.context!=2.6.0,>=2.4.0 # Apache-2.0 -oslo.log>=1.14.0 # Apache-2.0 -oslo.utils>=3.16.0 # Apache-2.0 -oslo.serialization>=1.10.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.i18n>=2.1.0 # Apache-2.0 -stevedore>=1.16.0 # Apache-2.0 -debtcollector>=1.2.0 # Apache-2.0 - -# for jsonutils -six>=1.9.0 # MIT -cachetools>=1.1.0 # MIT License - - -# FIXME(markmc): remove this when the drivers no longer -# import eventlet - -eventlet!=0.18.3,>=0.18.2 # MIT -greenlet>=0.3.2 # MIT - -WebOb>=1.2.3 # MIT - -# for the routing notifier -PyYAML>=3.1.0 # MIT - -# rabbit driver is the default -# we set the amqp version to ensure heartbeat works -amqp<2.0,>=1.4.0 # LGPL -kombu>=3.0.25 # BSD -pika>=0.10.0 # BSD -pika-pool>=0.1.3 # BSD - -# used by pika and zmq drivers -futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD -retrying!=1.3.0,>=1.2.3 # Apache-2.0 - -# middleware -oslo.middleware>=3.0.0 # Apache-2.0 diff --git a/setup-test-env-qpid.sh b/setup-test-env-qpid.sh deleted file mode 100755 index c66d85c..0000000 --- a/setup-test-env-qpid.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash -# -# Usage: setup-test-env-qpid.sh PROTOCOL -# where PROTOCOL is the version of the AMQP protocol to use with -# qpidd. Valid values for PROTOCOL are "1", "1.0", "0-10", "0.10" -set -e - -# require qpidd, qpid-tools sasl2-bin/cyrus-sasl-plain+cyrus-sasl-lib - -. tools/functions.sh - -DATADIR=$(mktemp -d /tmp/OSLOMSG-QPID.XXXXX) -trap "clean_exit $DATADIR" EXIT - -QPIDD=$(which qpidd 2>/dev/null) - -# which protocol should be used with qpidd? -# 1 for AMQP 1.0, 0.10 for AMQP 0.10 -# -PROTOCOL=$1 -case $PROTOCOL in - "1" | "1.0") - PROTOCOL="1" - shift - ;; - "0.10" | "0-10") - PROTOCOL="0-10" - shift - ;; - *) - # assume the old protocol - echo "No protocol specified, assuming 0.10" - PROTOCOL="0-10" - ;; -esac - -# ensure that the version of qpidd does support AMQP 1.0 -if [ $PROTOCOL == "1" ] && ! `$QPIDD --help | grep -q "queue-patterns"`; then - echo "This version of $QPIDD does not support AMQP 1.0" - exit 1 -fi - -[ -f "/usr/lib/qpid/daemon/acl.so" ] && LIBACL="load-module=/usr/lib/qpid/daemon/acl.so" - -cat > ${DATADIR}/qpidd.conf <=0.10). If this version of qpidd does -# not support the fix, then do not require authentication - -if [ $PROTOCOL == "1" ] && ! `$QPIDD --help | grep -q "sasl-service-name"`; then - echo "This version of $QPIDD does not support SASL authentication with AMQP 1.0" - cat >> ${DATADIR}/qpidd.conf <> ${DATADIR}/qpidd.conf <> ${DATADIR}/qpidd.conf <0.32 require this for AMQP 1 and SASL: - if `$QPIDD --help | grep -q "sasl-service-name"`; then - cat >> ${DATADIR}/qpidd.conf < ${DATADIR}/qpidd.acl < ${DATADIR}/sasl2/qpidd.conf <> ${DATADIR}/sasl2/qpidd.conf <> ${DATADIR}/sasl2/qpidd.conf < ${DATADIR}/zmq.conf < ${DATADIR}/zmq-proxy.log 2>&1 & - -$* diff --git a/setup-test-env-zmq-pub-sub.sh b/setup-test-env-zmq-pub-sub.sh deleted file mode 100755 index 5551be5..0000000 --- a/setup-test-env-zmq-pub-sub.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -e - -. tools/functions.sh - -DATADIR=$(mktemp -d /tmp/OSLOMSG-ZEROMQ.XXXXX) -trap "clean_exit $DATADIR" EXIT - -export TRANSPORT_URL=zmq:// -export ZMQ_MATCHMAKER=redis -export ZMQ_REDIS_PORT=65123 -export ZMQ_IPC_DIR=${DATADIR} -export ZMQ_USE_PUB_SUB=true -export ZMQ_USE_ROUTER_PROXY=true - -cat > ${DATADIR}/zmq.conf < ${DATADIR}/zmq-proxy.log 2>&1 & - -$* diff --git a/setup-test-env-zmq.sh b/setup-test-env-zmq.sh deleted file mode 100755 index 8780872..0000000 --- a/setup-test-env-zmq.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -e - -. tools/functions.sh - -DATADIR=$(mktemp -d /tmp/OSLOMSG-ZEROMQ.XXXXX) -trap "clean_exit $DATADIR" EXIT - -export TRANSPORT_URL=zmq:// -export ZMQ_MATCHMAKER=redis -export ZMQ_REDIS_PORT=65123 -export ZMQ_IPC_DIR=${DATADIR} -export ZMQ_USE_PUB_SUB=false -export ZMQ_USE_ROUTER_PROXY=false - -cat > ${DATADIR}/zmq.conf < ${DATADIR}/zmq-proxy.log 2>&1 & - -$* diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 9a3665e..0000000 --- a/setup.cfg +++ /dev/null @@ -1,106 +0,0 @@ -[metadata] -name = oslo.messaging -author = OpenStack -author-email = openstack-dev@lists.openstack.org -summary = Oslo Messaging API -description-file = - README.rst -home-page = https://wiki.openstack.org/wiki/Oslo#oslo.messaging -classifier = - Environment :: OpenStack - Intended Audience :: Developers - Intended Audience :: Information Technology - License :: OSI Approved :: Apache Software License - Operating System :: OS Independent - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.4 - Programming Language :: Python :: 3.5 - -[files] -packages = - oslo_messaging - -[entry_points] -console_scripts = - oslo-messaging-zmq-proxy = oslo_messaging._cmd.zmq_proxy:main - oslo-messaging-zmq-broker = oslo_messaging._cmd.zmq_proxy:main - oslo-messaging-send-notification = oslo_messaging.notify.notifier:_send_notification - -oslo.messaging.drivers = - rabbit = oslo_messaging._drivers.impl_rabbit:RabbitDriver - zmq = oslo_messaging._drivers.impl_zmq:ZmqDriver - amqp = oslo_messaging._drivers.impl_amqp1:ProtonDriver - - # This driver is supporting for only notification usage - kafka = oslo_messaging._drivers.impl_kafka:KafkaDriver - - # To avoid confusion - kombu = oslo_messaging._drivers.impl_rabbit:RabbitDriver - - # This is just for internal testing - fake = oslo_messaging._drivers.impl_fake:FakeDriver - pika = oslo_messaging._drivers.impl_pika:PikaDriver - -oslo.messaging.executors = - blocking = futurist:SynchronousExecutor - eventlet = futurist:GreenThreadPoolExecutor - threading = futurist:ThreadPoolExecutor - -oslo.messaging.notify.drivers = - messagingv2 = oslo_messaging.notify.messaging:MessagingV2Driver - messaging = oslo_messaging.notify.messaging:MessagingDriver - log = oslo_messaging.notify._impl_log:LogDriver - test = oslo_messaging.notify._impl_test:TestDriver - noop = oslo_messaging.notify._impl_noop:NoOpDriver - routing = oslo_messaging.notify._impl_routing:RoutingDriver - -oslo.messaging.pika.connection_factory = - # Creates new connection for each create_connection call. Old-style behaviour - # Uses a much more connections then single and read_write factories but still avalable as - # an option - new = oslo_messaging._drivers.pika_driver.pika_connection_factory:PikaConnectionFactory - - # Creates only one connection for transport and return it for each create connection call - # it is default, but you can not use it with synchronous executor - single = oslo_messaging._drivers.pika_driver.pika_connection_factory:SinglePikaConnectionFactory - - # Create two connections - one for listening and another one for sending and return them - # for each create connection call depending on connection purpose. Creates one more connection - # but you can use it with synchronous executor - read_write = oslo_messaging._drivers.pika_driver.pika_connection_factory:ReadWritePikaConnectionFactory - -oslo.messaging.zmq.matchmaker = - # Matchmakers for ZeroMQ - dummy = oslo_messaging._drivers.zmq_driver.matchmaker.base:DummyMatchMaker - redis = oslo_messaging._drivers.zmq_driver.matchmaker.matchmaker_redis:RedisMatchMaker - -oslo.config.opts = - oslo.messaging = oslo_messaging.opts:list_opts - -[build_sphinx] -source-dir = doc/source -build-dir = doc/build -all_files = 1 - -[upload_sphinx] -upload-dir = doc/build/html - -[compile_catalog] -directory = oslo_messaging/locale -domain = oslo_messaging - -[update_catalog] -domain = oslo_messaging -output_dir = oslo_messaging/locale -input_file = oslo_messaging/locale/oslo_messaging.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = oslo_messaging/locale/oslo_messaging.pot - -[pbr] -warnerrors = true diff --git a/setup.py b/setup.py deleted file mode 100644 index 782bb21..0000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=1.8'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index bc197fa..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,41 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Hacking already pins down pep8, pyflakes and flake8 -hacking<0.11,>=0.10.0 - -fixtures>=3.0.0 # Apache-2.0/BSD -mock>=2.0 # BSD -mox3>=0.7.0 # Apache-2.0 -python-subunit>=0.0.18 # Apache-2.0/BSD -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -oslotest>=1.10.0 # Apache-2.0 -pifpaf>=0.10.0 # Apache-2.0 -# for test_matchmaker_redis -redis>=2.10.0 # MIT - -# for test_impl_zmq -pyzmq>=14.3.1 # LGPL+BSD - -# for test_impl_kafka -kafka-python<1.0.0,>=0.9.5 # Apache-2.0 - -# when we can require tox>= 1.4, this can go into tox.ini: -# [testenv:cover] -# deps = {[testenv]deps} coverage -coverage>=3.6 # Apache-2.0 - -# this is required for the docs build jobs -sphinx!=1.3b1,<1.3,>=1.2.1 # BSD -oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 -reno>=1.8.0 # Apache2 - -# AMQP 1.0 support depends on the Qpid Proton AMQP 1.0 -# development libraries. -pyngus>=2.0.0 # Apache-2.0 - -# Bandit security code scanner -bandit>=1.0.1 # Apache-2.0 diff --git a/tools/functions.sh b/tools/functions.sh deleted file mode 100644 index 6581487..0000000 --- a/tools/functions.sh +++ /dev/null @@ -1,19 +0,0 @@ - -wait_for_line () { - while read line - do - echo "$line" | grep -q "$1" && break - echo "$line" | grep "$2" && exit 1 - done < "$3" - # Read the fifo for ever otherwise process would block - cat "$3" >/dev/null & -} - -function clean_exit(){ - local error_code="$?" - kill -9 $(jobs -p) - rm -rf "$1" - return $error_code -} - - diff --git a/tools/messages_length.yaml b/tools/messages_length.yaml deleted file mode 100644 index 092ed51..0000000 --- a/tools/messages_length.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# The numbers below present the length of the messages (in string equivalent) -# that were sent through the MQ backend (RabbitMQ) during the -# boot_and_delete_server Rally scenario run (50 times, concurrency equal to 3). -# The information was gathered via adding log to the _send method of -# AMQPDriverBase class after all lines related to the msg object modifications. - -# Message length was gathered to introduce real-like message generator for -# simulator.py oslo.messaging tool, that could introduce traffic closer to the -# real control plane load and estimate both message length and size (in bytes) -# going through the MQ layer. - -test_data: - string_lengths: 806, 992, 992, 1116, 1116, 1191, 1595, 1199, 1043, 1210, 1220, 1191, 1123, 1624, 2583, 1153, 4412, 1642, 1210, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 6386, 6368, 6386, 6368, 6386, 11292, 2136, 5407, 6368, 11292, 2136, 5407, 2116, 2116, 11292, 2136, 5398, 5407, 4357, 5431, 2116, 2116, 5398, 4407, 5431, 2116, 2116, 5398, 4457, 5431, 4387, 2627, 4387, 2094, 2038, 2627, 2094, 2038, 5438, 4387, 5438, 2310, 2310, 2627, 2094, 2496, 2038, 5451, 2310, 5438, 2496, 2496, 2240, 2099, 2240, 1500, 2099, 2626, 5451, 2240, 2626, 1555, 1555, 1702, 1500, 5451, 1702, 2450, 2450, 1570, 1155, 4539, 1570, 4539, 1641, 2099, 1641, 2626, 1555, 1702, 2450, 1570, 3518, 5710, 1641, 2226, 2643, 3382, 6671, 3518, 2531, 2226, 2643, 2124, 3382, 5500, 3518, 2531, 2226, 2643, 965, 2124, 3382, 5500, 6858, 2531, 1177, 965, 2124, 5687, 1177, 965, 1575, 1500, 1500, 2549, 7745, 1575, 5687, 7688, 2183, 1177, 2549, 965, 6574, 7688, 2183, 7270, 2128, 7270, 2128, 1575, 6535, 2549, 6574, 6480, 2643, 2584, 6535, 1220, 2644, 7688, 2183, 1500, 1676, 2611, 1500, 6480, 2611, 2643, 1624, 2241, 1153, 4696, 7270, 2128, 2584, 2644, 1590, 2611, 2611, 1555, 2241, 1555, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4480, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4504, 5431, 4434, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 2549, 6574, 7688, 2183, 1500, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1575, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4532, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4532, 5431, 4434, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 1500, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4532, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 2099, 2626, 5451, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 5687, 1177, 965, 1575, 2549, 6574, 7688, 2183, 7270, 1500, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1500, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 1500, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6516, 2300, 6516, 5839, 6156, 6512, 1597, 1500, 1026, 1676, 1500, 6516, 4505, 1220, 2300, 6516, 1624, 6535, 1153, 4668, 5839, 2228, 6156, 1590, 6480, 2643, 6512, 2228, 2584, 1611, 2644, 1102, 1701, 2611, 4354, 2449, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1575, 2582, 2398, 6386, 2226, 6368, 2093, 3420, 6576, 2142, 4452, 11292, 2136, 6536, 5407, 6386, 6368, 2298, 2116, 2116, 2608, 5398, 1855, 1880, 2175, 4526, 5431, 11292, 2136, 5407, 4456, 2627, 2094, 2038, 2116, 2310, 2496, 5438, 2116, 2240, 5398, 5451, 4604, 5431, 2099, 2626, 1555, 4506, 2627, 1702, 2094, 2038, 5438, 2310, 2450, 2496, 4539, 2240, 1641, 2099, 1500, 1570, 6386, 2626, 5451, 1555, 6368, 1500, 1702, 2450, 11292, 2136, 1570, 5407, 3518, 2116, 2116, 5398, 4539, 2226, 1641, 4604, 2643, 5431, 3382, 3518, 5500, 4506, 2531, 2627, 2094, 2038, 5438, 2226, 2310, 2124, 2643, 3382, 5451, 2496, 5500, 2240, 2531, 2099, 2626, 1555, 5687, 2124, 1177, 1702, 965, 2450, 1570, 4539, 1641, 1575, 3518, 2226, 2643, 3382, 5500, 1575, 5687, 2531, 1177, 965, 6574, 2549, 2124, 1500, 1500, 7688, 2183, 7270, 2128, 1575, 5687, 1177, 2549, 6574, 965, 6535, 7688, 2183, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1500, 1702, 1500, 2450, 1570, 3308, 2043, 3518, 7270, 2128, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 1575, 2549, 6574, 4604, 6535, 6536, 7688, 2183, 2298, 6480, 2643, 2608, 1855, 1880, 2175, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 7270, 2128, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4604, 5431, 2142, 4604, 6535, 6536, 4506, 2627, 2094, 2038, 2298, 6480, 2643, 2310, 5438, 2608, 2496, 1855, 1880, 2175, 2584, 2240, 2644, 2099, 2626, 5451, 2611, 1555, 2611, 2241, 1702, 2450, 1555, 1570, 1702, 2450, 1570, 3308, 2043, 3518, 4539, 1641, 3518, 2582, 2398, 6386, 2226, 6368, 2093, 3420, 6576, 2226, 2643, 3382, 5500, 2142, 4604, 11292, 2136, 6536, 5407, 2531, 2116, 2116, 2124, 5398, 2298, 2608, 1855, 1880, 2175, 4604, 5431, 5687, 1177, 4506, 965, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 1500, 4539, 1641, 1500, 1575, 2549, 6574, 3518, 7688, 2183, 2226, 2643, 3382, 5500, 2531, 2124, 7270, 2128, 6386, 6368, 11292, 2136, 5407, 5687, 1177, 2116, 2116, 5398, 965, 4604, 6535, 5431, 6480, 2643, 4506, 2584, 2627, 2094, 2644, 2038, 5438, 2611, 2310, 2611, 5451, 2496, 2241, 2240, 1575, 1555, 1702, 2450, 2099, 1570, 2626, 3308, 1555, 2043, 3518, 1702, 4539, 1575, 2450, 1641, 1570, 2549, 1500, 6574, 1500, 1220, 2582, 2398, 2226, 2093, 7688, 2183, 3420, 1624, 6576, 1676, 3518, 1153, 4717, 2142, 1590, 4501, 2226, 6536, 1611, 2643, 7270, 2128, 1102, 1701, 3382, 5500, 2449, 2298, 2608, 1855, 2531, 1880, 2175, 2124, 6535, 6480, 2643, 2584, 5687, 2644, 1177, 2611, 965, 2611, 2241, 1555, 1702, 2450, 6386, 6368, 1570, 3308, 2043, 3518, 11292, 2136, 5407, 2116, 2582, 2116, 2398, 5398, 2226, 2093, 4551, 3420, 6576, 5431, 1575, 1500, 6574, 1500, 4481, 2549, 1575, 2627, 2142, 2094, 2038, 5438, 2310, 2496, 4579, 6536, 2240, 2099, 7688, 2183, 2626, 5451, 1555, 2298, 1702, 2450, 1570, 2608, 1855, 1880, 2175, 7270, 2128, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 5687, 2241, 1177, 965, 1555, 6386, 6368, 1702, 2450, 1570, 11292, 2136, 3308, 5407, 2043, 3518, 2116, 2116, 5398, 2582, 4579, 2398, 5431, 2226, 2093, 3420, 4481, 1500, 6576, 2627, 2094, 2038, 5438, 1500, 2142, 2310, 1575, 1575, 2496, 2240, 6574, 2099, 4579, 2626, 1555, 2549, 5451, 1702, 6536, 2450, 1570, 7688, 2183, 2298, 2608, 1855, 1880, 2175, 3518, 5710, 2226, 1641, 2643, 3382, 6671, 7270, 2128, 2531, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 2124, 4629, 5431, 6535, 4531, 2627, 2094, 2038, 2310, 6480, 2643, 2496, 5438, 6858, 2584, 1177, 2240, 965, 2644, 1500, 2611, 5451, 2611, 2241, 2099, 1500, 2626, 1555, 1555, 1702, 2450, 1702, 1575, 1570, 2450, 4539, 1570, 1641, 3308, 2043, 3518, 1575, 3518, 2549, 7745, 2582, 2398, 2226, 2643, 2226, 7688, 2093, 2183, 3382, 3420, 5500, 6576, 2531, 2124, 2142, 4629, 6536, 2298, 2608, 7270, 2128, 1855, 1880, 2175, 5687, 1177, 965, 6535, 6480, 2643, 2584, 2644, 6386, 6368, 2611, 2611, 2241, 11292, 2136, 5407, 1555, 1500, 1702, 2116, 2116, 1500, 5398, 2450, 1570, 3308, 4629, 2043, 5431, 3518, 1575, 4531, 2549, 2627, 2094, 2038, 5438, 6574, 2582, 2310, 2496, 2398, 5451, 2240, 7688, 2183, 2226, 1575, 2093, 3420, 2099, 2626, 1555, 6576, 1702, 2450, 2142, 1570, 4629, 6536, 4539, 1641, 2298, 2608, 1855, 1880, 2175, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 1500, 2531, 1500, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 5687, 6386, 1177, 1555, 6368, 965, 1702, 2450, 11292, 1570, 2136, 3308, 5407, 2043, 3518, 2116, 2116, 5398, 1575, 2582, 4679, 2398, 2226, 5431, 2093, 3420, 6576, 4581, 2627, 2094, 2038, 2310, 1575, 2496, 2549, 2142, 5438, 6574, 2240, 4679, 6536, 7688, 2183, 5451, 2099, 2626, 2298, 1555, 2608, 1855, 1880, 2175, 1702, 2450, 1570, 7270, 4539, 1500, 2128, 1641, 1500, 1597, 1066, 3518, 2226, 2643, 3382, 5500, 1220, 2531, 1624, 2124, 1153, 1676, 4818, 6386, 6535, 6368, 1624, 6480, 2643, 2584, 1611, 2644, 5687, 2611, 11292, 2136, 2611, 2241, 1177, 965, 1102, 1701, 5407, 2449, 1555, 1575, 1702, 2116, 2450, 2116, 1570, 5398, 3308, 2043, 3518, 4602, 5431, 2582, 2398, 4532, 2226, 2627, 2094, 2038, 2093, 5438, 2310, 3420, 2496, 6576, 1575, 2240, 5451, 2549, 2142, 6574, 4630, 6536, 2099, 2626, 1500, 7688, 2183, 1500, 4539, 1555, 2298, 1641, 2608, 1702, 1855, 1880, 2175, 2450, 1570, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 2531, 6386, 6368, 6535, 2124, 6480, 2643, 11292, 2136, 2584, 5407, 2644, 2611, 2611, 2241, 2116, 2116, 5687, 5398, 1177, 1555, 965, 1575, 1702, 2450, 4630, 1570, 3308, 5431, 2043, 3518, 4532, 2627, 2094, 2038, 5438, 2310, 2496, 2582, 2398, 2240, 5451, 2226, 2093, 1500, 2099, 3420, 6576, 2626, 1500, 1555, 1575, 6574, 2549, 2142, 1702, 4630, 4539, 2450, 1641, 6536, 1570, 7688, 2183, 2298, 2608, 1855, 1880, 2175, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 5687, 2241, 1177, 965, 1555, 1702, 6386, 2450, 6368, 1570, 3308, 2043, 1575, 1500, 3518, 11292, 2136, 5407, 1500, 2582, 2116, 2398, 2116, 2226, 5398, 2093, 3420, 6576, 4680, 5431, 2142, 4680, 6536, 4582, 1575, 2627, 2094, 2038, 5438, 6574, 2549, 2310, 5451, 2496, 2298, 2240, 2608, 1855, 1880, 2175, 7688, 2183, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 7270, 2128, 2531, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4680, 5431, 4582, 1500, 2627, 2094, 2038, 2310, 2124, 2496, 5438, 1500, 2240, 5451, 6535, 2099, 2626, 1555, 5687, 1177, 1702, 965, 6480, 2643, 2450, 2584, 1570, 2644, 2611, 1575, 4539, 2611, 1641, 2241, 1555, 1702, 3518, 2450, 1570, 3308, 1575, 2043, 3518, 2226, 2549, 2643, 6574, 3382, 5500, 2531, 7688, 2183, 2582, 2398, 2124, 2226, 2093, 3420, 6576, 2142, 4680, 6536, 5687, 1177, 2298, 965, 2608, 1855, 1880, 2175, 7270, 2128, 1500, 1500, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4680, 5431, 4582, 1575, 2627, 2094, 2038, 5438, 2549, 6574, 2310, 2496, 5451, 6535, 1575, 2240, 6480, 2643, 2099, 2626, 7688, 2183, 2584, 1555, 2644, 1702, 2611, 2611, 2450, 1570, 2241, 4539, 1641, 1555, 7270, 2128, 1712, 1702, 1154, 2450, 1570, 3308, 2043, 1500, 3518, 3518, 1500, 2582, 2398, 1220, 2226, 2226, 2643, 2093, 1624, 3420, 6576, 3382, 1153, 5500, 6535, 2531, 2124, 4768, 1624, 2142, 1676, 4552, 6480, 6536, 2643, 2584, 2644, 2611, 2298, 2611, 2608, 1855, 1880, 2241, 2175, 5687, 1177, 965, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4552, 1575, 1575, 6536, 6386, 2549, 6368, 6574, 1500, 2298, 1500, 7688, 2183, 2608, 11292, 1855, 1880, 2175, 2136, 5407, 2116, 2116, 5398, 4552, 5431, 7270, 4482, 2128, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 6386, 6368, 6535, 4539, 1641, 11292, 2136, 5407, 6480, 2643, 1575, 2584, 3518, 2644, 2611, 2611, 2116, 2116, 2241, 5398, 2226, 2643, 1555, 1702, 3382, 5500, 4580, 2450, 1570, 5431, 3308, 2043, 2531, 3518, 4482, 2124, 2627, 2094, 2038, 2310, 2496, 5438, 2582, 5451, 2240, 2398, 2226, 5687, 2093, 2099, 3420, 2626, 1177, 1555, 6576, 965, 1702, 2450, 1570, 2142, 4580, 4539, 6536, 1641, 1500, 2298, 1500, 2608, 1855, 1880, 2175, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 2549, 6574, 5687, 7688, 2183, 1177, 965, 7270, 2128, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4630, 1575, 5431, 1500, 1575, 4532, 1500, 2627, 2094, 2038, 5438, 2310, 2496, 2549, 6574, 6535, 2240, 7688, 2183, 2099, 2626, 5451, 6480, 2643, 1555, 2584, 2644, 1702, 2611, 2450, 1570, 2611, 7270, 2241, 2128, 1555, 1702, 4539, 1641, 2450, 1570, 3308, 2043, 3518, 3518, 6535, 6480, 2643, 2582, 2226, 2398, 2226, 2584, 2644, 2643, 2611, 2093, 2611, 3382, 3420, 2241, 5500, 6576, 1500, 1500, 2531, 1555, 2142, 4630, 6536, 2124, 1702, 2450, 1570, 2298, 5687, 2608, 1855, 1880, 2175, 3308, 2043, 1177, 965, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4630, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1575, 6386, 6368, 2549, 6574, 11292, 2136, 7688, 2183, 5407, 2116, 2116, 5398, 4630, 5431, 4532, 2627, 2094, 2038, 2310, 5438, 7270, 2496, 2128, 1500, 1500, 2240, 2099, 5451, 2626, 1555, 6386, 6368, 1702, 2450, 1570, 11292, 1575, 2136, 5407, 4539, 2116, 1641, 2116, 5398, 6535, 3518, 6480, 2643, 4630, 5431, 2226, 2643, 2584, 2644, 2611, 3382, 2611, 2241, 5500, 1555, 4532, 2627, 2094, 2038, 2531, 1702, 2310, 2450, 1570, 2496, 2124, 3308, 5438, 2240, 2043, 3518, 2099, 5451, 2626, 1555, 1702, 2582, 2398, 5687, 2450, 2226, 1570, 1177, 965, 2093, 3420, 6576, 2142, 4630, 4539, 6536, 1641, 1500, 3518, 1500, 2298, 2608, 1855, 1880, 2175, 2226, 2643, 1220, 3382, 5500, 1575, 1676, 2531, 2549, 6574, 1624, 2124, 7688, 2183, 1153, 4741, 1590, 1611, 5687, 1102, 1701, 1177, 965, 2449, 1597, 1066, 7270, 2128, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4525, 5431, 4455, 2627, 2094, 2038, 5438, 2310, 2496, 1500, 2240, 5451, 1500, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 4539, 1641, 2549, 6574, 6535, 3518, 7688, 2183, 6480, 2643, 2584, 2644, 2226, 2611, 2643, 2611, 3382, 2241, 5500, 1555, 2531, 7270, 2124, 2128, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4553, 6536, 1500, 1500, 2298, 2608, 1855, 1880, 2175, 6535, 5687, 1177, 965, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 1575, 6574, 6386, 2549, 2142, 6368, 4553, 11292, 2136, 6536, 5407, 7688, 2183, 2116, 2298, 2116, 5398, 2608, 1855, 1880, 2175, 1500, 1500, 7270, 2128, 4553, 5431, 4455, 6386, 6368, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 6535, 5451, 11292, 2136, 6480, 2643, 5407, 2584, 2099, 2116, 2626, 2644, 1555, 2116, 2611, 5398, 1702, 2611, 1575, 2450, 2241, 4539, 4553, 1570, 1555, 1641, 5431, 1702, 2450, 4455, 1570, 2627, 2094, 2038, 3308, 5438, 2310, 2043, 2496, 3518, 2240, 3518, 5451, 2099, 2626, 2226, 2643, 2582, 2398, 3382, 1555, 5500, 2226, 1702, 2093, 2531, 2450, 3420, 1570, 6576, 2124, 4539, 1641, 2142, 4553, 6536, 2298, 3518, 1500, 2608, 1855, 1880, 2175, 1500, 2226, 2643, 3382, 5500, 5687, 2531, 1177, 965, 2124, 6386, 6368, 11292, 2136, 5407, 1575, 5687, 2549, 6574, 1177, 2116, 965, 2116, 7688, 2183, 5398, 4553, 5431, 1575, 4455, 2627, 2094, 2038, 5438, 2310, 2496, 1500, 7270, 1500, 2128, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 2549, 4539, 6574, 1641, 6535, 3518, 7688, 2183, 6480, 2643, 2584, 2226, 2644, 2643, 2611, 3382, 2611, 5500, 2241, 1555, 2531, 1702, 2450, 2124, 1570, 7270, 2128, 3308, 2043, 3518, 2582, 1500, 2398, 2226, 1500, 2093, 5687, 3420, 1177, 6576, 2142, 4553, 965, 6536, 6535, 2298, 2608, 6480, 1855, 2643, 1880, 2175, 2584, 2644, 2611, 1220, 2611, 2241, 1555, 1702, 2450, 1570, 1676, 3308, 2043, 3518, 1575, 2582, 2398, 1624, 2226, 2549, 6574, 2093, 3420, 1153, 6386, 6576, 7688, 6368, 2183, 1575, 4767, 1624, 11292, 2136, 5407, 2142, 4551, 1611, 7270, 2128, 1102, 1701, 1500, 2449, 1500, 6536, 2116, 2116, 5398, 2298, 2608, 1855, 1880, 2175, 4551, 5431, 4481, 2627, 2094, 2038, 5438, 2310, 2496, 5451, 6535, 2240, 2099, 6480, 2643, 2626, 1555, 2584, 2644, 1702, 4539, 2611, 6386, 1641, 2450, 2611, 6368, 1570, 2241, 1555, 1575, 1702, 11292, 2450, 1570, 2136, 5407, 3308, 2043, 3518, 2116, 3518, 2116, 5398, 4579, 2582, 2226, 5431, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 4481, 6576, 2627, 2094, 2038, 5438, 2531, 2310, 2496, 5451, 2142, 2124, 4579, 2240, 6536, 2099, 2626, 1555, 2298, 2608, 1702, 1855, 1880, 2175, 2450, 1570, 4539, 1641, 5687, 1500, 1177, 965, 1500, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 2549, 6574, 7688, 2183, 5687, 1177, 965, 6386, 6368, 11292, 2136, 1575, 5407, 2116, 2116, 5398, 1500, 1500, 4579, 7270, 2128, 5431, 4481, 1575, 2627, 2094, 2038, 5438, 2549, 2310, 6574, 2496, 6535, 5451, 2240, 7688, 2183, 2099, 6457, 2643, 2626, 1555, 2584, 4539, 2644, 2611, 1641, 1702, 7270, 2128, 2611, 2450, 2241, 1570, 1555, 1500, 1500, 1702, 2450, 1570, 3308, 2043, 3518, 3518, 6535, 2582, 2398, 2226, 2643, 6480, 2643, 3382, 2226, 5500, 2584, 2644, 2093, 3420, 2611, 6553, 2531, 2611, 2124, 2241, 2142, 4579, 1555, 6513, 1702, 2298, 2450, 1570, 2608, 1855, 1880, 2175, 3308, 2043, 3518, 5687, 1177, 965, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4579, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1575, 2549, 6574, 6386, 6368, 7688, 2183, 11292, 2136, 5407, 1500, 2116, 1500, 2116, 5398, 4579, 5431, 4481, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 7270, 2128, 1555, 1575, 1702, 2450, 4539, 1570, 6386, 1641, 6368, 11292, 2136, 6535, 5407, 6480, 2643, 2116, 2116, 3518, 2584, 5398, 2644, 2611, 2226, 2643, 4629, 2611, 5431, 3382, 2241, 5500, 4531, 1555, 2531, 2627, 2094, 2038, 1702, 2310, 5438, 2450, 2496, 2124, 1570, 3308, 2240, 2043, 3518, 5451, 2099, 1500, 2626, 1500, 1555, 5687, 1702, 1177, 2450, 2582, 965, 1570, 2398, 2226, 2093, 3420, 6576, 4539, 1641, 2142, 4629, 6536, 3518, 2298, 2608, 1855, 1880, 2175, 2226, 2643, 3382, 5500, 1575, 1220, 2531, 1676, 2549, 6574, 2124, 1624, 7688, 2183, 1153, 4769, 1624, 1611, 1102, 1701, 5687, 2449, 1177, 1597, 965, 1066, 7270, 2128, 1500, 6386, 1500, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 1575, 4553, 5431, 4483, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 5451, 6535, 6574, 2549, 6480, 2643, 3518, 2584, 2644, 7688, 2183, 2226, 2611, 2643, 2611, 5710, 2241, 3382, 1641, 1555, 6671, 1702, 2450, 1570, 3308, 2531, 2043, 3518, 2124, 1500, 2582, 1500, 2398, 2226, 2093, 3420, 7270, 2128, 6576, 2142, 6858, 4581, 1177, 6536, 2298, 965, 2608, 6535, 1855, 1880, 2175, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 2142, 4581, 2549, 7745, 6536, 1575, 2298, 2608, 7688, 2183, 1855, 1880, 2175, 6386, 6368, 1500, 1500, 11292, 2136, 5407, 7270, 2128, 2116, 2116, 5398, 4631, 6386, 6368, 5431, 11292, 2136, 5407, 4533, 2627, 2094, 2038, 2310, 2496, 2116, 5438, 2116, 5398, 2240, 2099, 6535, 2626, 6480, 2643, 5451, 2584, 2644, 4631, 1555, 5431, 2611, 4533, 2627, 2094, 2038, 1702, 2310, 2496, 2611, 2241, 2450, 1570, 2240, 5438, 2099, 2626, 1555, 5451, 1555, 1702, 4539, 1641, 1702, 2450, 2450, 1570, 1570, 3518, 3308, 2043, 3518, 2226, 1575, 2643, 4539, 3382, 5500, 2582, 2398, 3518, 2226, 1641, 2226, 2093, 3420, 2643, 6576, 2531, 3382, 2124, 5500, 2142, 4631, 6536, 2531, 2298, 2608, 1855, 1880, 2175, 2124, 5687, 1177, 965, 1500, 1500, 1575, 5687, 1177, 2549, 965, 6574, 7688, 2183, 7270, 2128, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 1575, 4631, 1500, 2549, 1500, 5431, 6574, 6535, 4533, 2627, 2094, 2038, 7688, 2183, 2310, 6480, 2643, 2496, 5438, 2240, 2584, 2099, 2626, 2644, 2611, 5451, 1555, 2611, 1702, 2241, 2450, 1570, 1555, 1702, 2450, 1570, 7270, 3308, 2128, 4539, 2043, 3518, 1641, 3518, 2582, 2226, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 6576, 2531, 2142, 4631, 2124, 6536, 6535, 2298, 2608, 6480, 1855, 2643, 1880, 2175, 2584, 2644, 2611, 2611, 2241, 5687, 1177, 1555, 965, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4631, 6536, 2298, 1575, 2608, 1855, 1880, 2175, 6574, 1575, 1676, 7688, 2183, 1220, 2549, 1624, 1153, 4691, 6386, 6368, 1590, 1611, 7270, 2128, 1102, 1701, 11292, 2136, 2449, 5407, 1500, 1500, 2116, 2116, 5398, 4549, 5431, 6535, 6386, 6480, 6368, 2643, 4479, 2627, 2094, 2038, 2584, 2644, 5438, 1575, 2310, 5451, 2496, 2611, 2240, 2099, 2611, 2241, 2626, 11292, 2136, 1555, 5407, 1702, 2450, 1555, 1702, 2116, 1570, 2116, 2450, 5398, 4539, 1570, 1641, 4577, 3308, 5431, 2043, 3518, 3518, 4479, 2226, 2627, 2094, 2038, 5438, 2643, 2310, 3382, 5500, 2496, 2582, 5451, 2240, 2398, 2099, 2531, 2626, 1555, 2226, 2093, 1702, 2124, 3420, 2450, 1570, 6576, 2142, 4577, 6536, 4539, 1641, 2298, 5687, 2608, 1855, 1880, 2175, 1177, 965, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1500, 1500, 1575, 5687, 2549, 1177, 6574, 965, 7688, 2183, 6386, 6368, 1575, 7270, 2128, 11292, 2136, 5407, 2116, 2116, 5398, 4577, 5431, 4479, 1575, 2627, 2094, 2038, 5438, 2549, 2310, 6574, 2496, 6535, 1500, 5451, 1500, 2240, 6480, 2643, 7688, 2183, 2584, 2099, 2644, 2626, 1555, 2611, 2611, 1702, 4539, 2450, 2241, 1570, 1641, 1555, 1702, 2450, 1570, 3308, 7270, 2043, 2128, 3518, 3518, 2582, 2398, 2226, 2226, 2643, 2093, 3382, 3420, 5500, 6576, 2142, 2531, 4577, 6536, 6535, 6480, 2643, 2124, 2584, 2644, 2298, 2608, 2611, 1855, 1880, 2175, 2611, 2241, 1555, 1702, 2450, 1570, 5687, 3308, 1177, 2043, 965, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1500, 2142, 1500, 4577, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 2549, 6574, 7688, 2183, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 7270, 2128, 4627, 5431, 4529, 2627, 2094, 2038, 5438, 2310, 2496, 6386, 6368, 6535, 11292, 2136, 5407, 2240, 2099, 5451, 2626, 6480, 2643, 1555, 2584, 2116, 2644, 1702, 2611, 2116, 2450, 5398, 2611, 1570, 2241, 4539, 4627, 1641, 1555, 1500, 5431, 1500, 1702, 2450, 4529, 1570, 2627, 2094, 3518, 2038, 5438, 3308, 2310, 2043, 3518, 2226, 2496, 2643, 3382, 5451, 1575, 2240, 5500, 2582, 2398, 2226, 2099, 2626, 2093, 3420, 1555, 2531, 6576, 2124, 1702, 4539, 2450, 2142, 1570, 1641, 4627, 6536, 2298, 2608, 1855, 1880, 2175, 5687, 1177, 965, 3518, 2226, 2643, 3382, 5500, 2531, 1575, 2124, 2549, 6574, 6386, 7688, 2183, 6368, 1568, 5687, 1177, 11292, 965, 2136, 5407, 1500, 1500, 2116, 2116, 5398, 7270, 2128, 1712, 1575, 4627, 1154, 5431, 4529, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 5451, 1676, 2099, 2626, 1555, 1220, 1702, 2450, 1575, 1570, 2549, 6574, 6535, 1624, 4539, 7688, 2183, 1641, 1500, 1500, 6480, 2643, 3518, 1153, 2584, 2644, 2226, 4817, 2611, 2643, 2611, 1590, 3382, 2241, 5500, 1624, 1555, 2559, 2561, 2559, 2531, 1702, 2124, 7270, 2579, 2579, 2450, 1611, 1570, 2128, 3308, 1102, 1701, 2449, 2043, 3518, 1597, 1106, 2582, 5687, 2398, 2226, 1177, 2093, 3420, 6576, 965, 6535, 2142, 4601, 6536, 6480, 2643, 2584, 2644, 2298, 1500, 2608, 1500, 2611, 1855, 1880, 2175, 2611, 2241, 1555, 1702, 2450, 1570, 1575, 3308, 2043, 3518, 1575, 2549, 6574, 2582, 2398, 2226, 7688, 2093, 2183, 3420, 6576, 2142, 4601, 6536, 2298, 6386, 2608, 6368, 1855, 1880, 2175, 7270, 2128, 11292, 2136, 5407, 2116, 2116, 5398, 4601, 5431, 4531, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 1500, 5451, 1500, 6535, 2099, 2626, 1555, 6480, 2643, 2584, 1702, 2644, 2450, 2611, 1570, 2611, 2241, 1555, 4539, 1641, 1702, 2450, 1570, 3308, 2043, 3518, 3518, 2582, 2226, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 6576, 2531, 2142, 4629, 2124, 6536, 2298, 2608, 1855, 1880, 2175, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4629, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 7291, 2128, 6534, 6479, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4629, 7291, 2128, 6536, 2298, 6534, 2608, 1855, 1880, 2175, 6479, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 1500, 2093, 3420, 1500, 6576, 2142, 4629, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1500, 1500, 1220, 1624, 1153, 4412, 1676, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1676, 1220, 1624, 1153, 4412, 1597, 908, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1676, 1220, 1624, 1153, 1500, 4412, 1500, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1597, 908, 1500, 1500, 1676, 1220, 1624, 1153, 4412, 1590, 1500, 1500, 1500, 1500, 1500, 1500 \ No newline at end of file diff --git a/tools/simulator.py b/tools/simulator.py deleted file mode 100755 index 2f3161b..0000000 --- a/tools/simulator.py +++ /dev/null @@ -1,730 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -eventlet.monkey_patch() - -import argparse -import bisect -import collections -import functools -import itertools -import json -import logging -import os -import random -import signal -import six -import string -import sys -import threading -import time -import yaml - -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_messaging import notify # noqa -from oslo_messaging import rpc # noqa -from oslo_utils import timeutils - -LOG = logging.getLogger() -RANDOM_GENERATOR = None -CURRENT_PID = None -CLIENTS = [] -MESSAGES = [] -IS_RUNNING = True -SERVERS = [] -TRANSPORT = None - -USAGE = """ Usage: ./simulator.py [-h] [--url URL] [-d DEBUG]\ - {notify-server,notify-client,rpc-server,rpc-client} ... - -Usage example: - python tools/simulator.py\ - --url rabbit://stackrabbit:secretrabbit@localhost/ rpc-server - python tools/simulator.py\ - --url rabbit://stackrabbit:secretrabbit@localhost/ rpc-client\ - --exit-wait 15000 -p 64 -m 64""" - -MESSAGES_LIMIT = 1000 -DISTRIBUTION_BUCKET_SIZE = 500 - - -def init_random_generator(): - data = [] - file_dir = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join(file_dir, 'messages_length.yaml')) as m_file: - content = yaml.safe_load(m_file) - data += [int(n) for n in content[ - 'test_data']['string_lengths'].split(', ')] - - ranges = collections.defaultdict(int) - for msg_length in data: - range_start = ((msg_length // DISTRIBUTION_BUCKET_SIZE) * - DISTRIBUTION_BUCKET_SIZE + 1) - ranges[range_start] += 1 - - ranges_start = sorted(ranges.keys()) - total_count = len(data) - - accumulated_distribution = [] - running_total = 0 - for range_start in ranges_start: - norm = float(ranges[range_start]) / total_count - running_total += norm - accumulated_distribution.append(running_total) - - def weighted_random_choice(): - r = random.random() * running_total - start = ranges_start[bisect.bisect_right(accumulated_distribution, r)] - return random.randrange(start, start + DISTRIBUTION_BUCKET_SIZE) - - return weighted_random_choice - - -class LoggingNoParsingFilter(logging.Filter): - def filter(self, record): - msg = record.getMessage() - for i in ['received {', 'MSG_ID is ']: - if i in msg: - return False - return True - - -Message = collections.namedtuple( - 'Message', ['seq', 'cargo', 'client_ts', 'server_ts', 'return_ts']) - - -def make_message(seq, cargo, client_ts=0, server_ts=0, return_ts=0): - return Message(seq, cargo, client_ts, server_ts, return_ts) - - -def update_message(message, **kwargs): - return Message(*message)._replace(**kwargs) - - -class MessageStatsCollector(object): - def __init__(self, label): - self.label = label - self.buffer = [] # buffer to store messages during report interval - self.series = [] # stats for every report interval - - now = time.time() - diff = int(now) - now + 1 # align start to whole seconds - threading.Timer(diff, self.monitor).start() # schedule in a second - - def monitor(self): - global IS_RUNNING - if IS_RUNNING: - threading.Timer(1.0, self.monitor).start() - now = time.time() - - count = len(self.buffer) - - size = 0 - min_latency = sys.maxsize - max_latency = 0 - sum_latencies = 0 - - for i in six.moves.range(count): - p = self.buffer[i] - size += len(p.cargo) - - latency = None - if p.return_ts: - latency = p.return_ts - p.client_ts # round-trip - elif p.server_ts: - latency = p.server_ts - p.client_ts # client -> server - - if latency: - sum_latencies += latency - min_latency = min(min_latency, latency) - max_latency = max(max_latency, latency) - - del self.buffer[:count] # trim processed items - - seq = len(self.series) - stats = dict(seq=seq, timestamp=now, count=count, size=size) - msg = ('%-14s: seq: %-4d count: %-6d bytes: %-10d' % - (self.label, seq, count, size)) - - if sum_latencies: - latency = sum_latencies / count - stats.update(dict(latency=latency, - min_latency=min_latency, - max_latency=max_latency)) - msg += (' latency: %-9.3f min: %-9.3f max: %-9.3f' % - (latency, min_latency, max_latency)) - - self.series.append(stats) - LOG.info(msg) - - def push(self, parsed_message): - self.buffer.append(parsed_message) - - def get_series(self): - return self.series - - @staticmethod - def calc_stats(label, *collectors): - count = 0 - size = 0 - min_latency = sys.maxsize - max_latency = 0 - sum_latencies = 0 - start = sys.maxsize - end = 0 - - for point in itertools.chain(*(c.get_series() for c in collectors)): - count += point['count'] - size += point['size'] - start = min(start, point['timestamp']) - end = max(end, point['timestamp']) - - if 'latency' in point: - sum_latencies += point['latency'] * point['count'] - min_latency = min(min_latency, point['min_latency']) - max_latency = max(max_latency, point['max_latency']) - - # start is the timestamp of the earliest block, which inclides samples - # for the prior second - start -= 1 - duration = end - start if count else 0 - stats = dict(count=count, size=size, duration=duration, count_p_s=0, - size_p_s=0) - if duration: - stats.update(dict(start=start, end=end, - count_p_s=count / duration, - size_p_s=size / duration)) - - msg = ('%s: duration: %.2f count: %d (%.1f msg/sec) ' - 'bytes: %d (%.0f bps)' % - (label, duration, count, stats['count_p_s'], - size, stats['size_p_s'])) - - if sum_latencies: - latency = sum_latencies / count - stats.update(dict(latency=latency, - min_latency=min_latency, - max_latency=max_latency)) - msg += (' latency: %.3f min: %.3f max: %.3f' % - (latency, min_latency, max_latency)) - - LOG.info(msg) - return stats - - -class NotifyEndpoint(object): - def __init__(self, wait_before_answer, requeue): - self.wait_before_answer = wait_before_answer - self.requeue = requeue - self.received_messages = MessageStatsCollector('server') - self.cache = set() - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - LOG.debug("%s %s %s %s", ctxt, publisher_id, event_type, payload) - - server_ts = time.time() - - message = update_message(payload, server_ts=server_ts) - self.received_messages.push(message) - - if self.requeue and message.seq not in self.cache: - self.cache.add(message.seq) - - if self.wait_before_answer > 0: - time.sleep(self.wait_before_answer) - - return messaging.NotificationResult.REQUEUE - - return messaging.NotificationResult.HANDLED - - -def notify_server(transport, topic, wait_before_answer, duration, requeue): - endpoints = [NotifyEndpoint(wait_before_answer, requeue)] - target = messaging.Target(topic=topic) - server = notify.get_notification_listener(transport, [target], - endpoints, executor='eventlet') - run_server(server, duration=duration) - - return endpoints[0] - - -class BatchNotifyEndpoint(object): - def __init__(self, wait_before_answer, requeue): - self.wait_before_answer = wait_before_answer - self.requeue = requeue - self.received_messages = MessageStatsCollector('server') - self.cache = set() - - def info(self, batch): - LOG.debug('msg rcv') - LOG.debug("%s", batch) - - server_ts = time.time() - - for item in batch: - message = update_message(item['payload'], server_ts=server_ts) - self.received_messages.push(message) - - return messaging.NotificationResult.HANDLED - - -def batch_notify_server(transport, topic, wait_before_answer, duration, - requeue): - endpoints = [BatchNotifyEndpoint(wait_before_answer, requeue)] - target = messaging.Target(topic=topic) - server = notify.get_batch_notification_listener( - transport, [target], - endpoints, executor='eventlet', - batch_size=1000, batch_timeout=5) - run_server(server, duration=duration) - - return endpoints[0] - - -class RpcEndpoint(object): - def __init__(self, wait_before_answer): - self.wait_before_answer = wait_before_answer - self.received_messages = MessageStatsCollector('server') - - def info(self, ctxt, message): - server_ts = time.time() - - LOG.debug("######## RCV: %s", message) - - reply = update_message(message, server_ts=server_ts) - self.received_messages.push(reply) - - if self.wait_before_answer > 0: - time.sleep(self.wait_before_answer) - - return reply - - -class Client(object): - def __init__(self, client_id, client, method, has_result, - wait_after_msg): - self.client_id = client_id - self.client = client - self.method = method - self.wait_after_msg = wait_after_msg - - self.seq = 0 - self.messages_count = len(MESSAGES) - # Start sending the messages from a random position to avoid - # memory re-usage and generate more realistic load on the library - # and a message transport - self.position = random.randint(0, self.messages_count - 1) - self.sent_messages = MessageStatsCollector('client-%s' % client_id) - self.errors = MessageStatsCollector('error-%s' % client_id) - - if has_result: - self.round_trip_messages = MessageStatsCollector( - 'round-trip-%s' % client_id) - - def send_msg(self): - msg = make_message(self.seq, MESSAGES[self.position], time.time()) - self.sent_messages.push(msg) - - res = None - try: - res = self.method(self.client, msg) - except Exception: - self.errors.push(msg) - else: - LOG.debug("SENT: %s", msg) - - if res: - return_ts = time.time() - res = update_message(res, return_ts=return_ts) - self.round_trip_messages.push(res) - - self.seq += 1 - self.position = (self.position + 1) % self.messages_count - if self.wait_after_msg > 0: - time.sleep(self.wait_after_msg) - - -class RPCClient(Client): - def __init__(self, client_id, transport, target, timeout, is_cast, - wait_after_msg): - client = rpc.RPCClient(transport, target).prepare(timeout=timeout) - method = _rpc_cast if is_cast else _rpc_call - - super(RPCClient, self).__init__(client_id, client, method, - not is_cast, wait_after_msg) - - -class NotifyClient(Client): - def __init__(self, client_id, transport, topic, wait_after_msg): - client = notify.Notifier(transport, driver='messaging', topic=topic) - client = client.prepare(publisher_id='publisher-%d' % client_id) - method = _notify - super(NotifyClient, self).__init__(client_id, client, method, - False, wait_after_msg) - - -def generate_messages(messages_count): - # Limit the messages amount. Clients will reiterate the array again - # if an amount of messages to be sent is bigger than MESSAGES_LIMIT - if messages_count > MESSAGES_LIMIT: - messages_count = MESSAGES_LIMIT - LOG.info("Generating %d random messages", messages_count) - - for i in six.moves.range(messages_count): - length = RANDOM_GENERATOR() - msg = ''.join(random.choice( - string.ascii_lowercase) for x in six.moves.range(length)) - MESSAGES.append(msg) - - LOG.info("Messages has been prepared") - - -def wrap_sigexit(f): - def inner(*args, **kwargs): - try: - return f(*args, **kwargs) - except SignalExit as e: - LOG.info('Signal %s is caught. Interrupting the execution', - e.signo) - for server in SERVERS: - server.stop() - server.wait() - finally: - if TRANSPORT: - TRANSPORT.cleanup() - return inner - - -@wrap_sigexit -def run_server(server, duration=None): - global IS_RUNNING - SERVERS.append(server) - server.start() - if duration: - with timeutils.StopWatch(duration) as stop_watch: - while not stop_watch.expired() and IS_RUNNING: - time.sleep(1) - server.stop() - IS_RUNNING = False - server.wait() - LOG.info('The server is terminating') - time.sleep(1) # wait for stats collector to process the last second - - -def rpc_server(transport, target, wait_before_answer, executor, duration): - endpoints = [RpcEndpoint(wait_before_answer)] - server = rpc.get_rpc_server(transport, target, endpoints, - executor=executor) - LOG.debug("starting RPC server for target %s", target) - - run_server(server, duration=duration) - - return server.dispatcher.endpoints[0] - - -@wrap_sigexit -def spawn_rpc_clients(threads, transport, targets, wait_after_msg, timeout, - is_cast, messages_count, duration): - p = eventlet.GreenPool(size=threads) - targets = itertools.cycle(targets) - for i in six.moves.range(threads): - target = next(targets) - LOG.debug("starting RPC client for target %s", target) - client_builder = functools.partial(RPCClient, i, transport, target, - timeout, is_cast, wait_after_msg) - p.spawn_n(send_messages, i, client_builder, messages_count, duration) - p.waitall() - - -@wrap_sigexit -def spawn_notify_clients(threads, topic, transport, message_count, - wait_after_msg, timeout, duration): - p = eventlet.GreenPool(size=threads) - for i in six.moves.range(threads): - client_builder = functools.partial(NotifyClient, i, transport, topic, - wait_after_msg) - p.spawn_n(send_messages, i, client_builder, message_count, duration) - p.waitall() - - -def send_messages(client_id, client_builder, messages_count, duration): - global IS_RUNNING - client = client_builder() - CLIENTS.append(client) - - # align message sending closer to whole seconds - now = time.time() - diff = int(now) - now + 1 - time.sleep(diff) - - if duration: - with timeutils.StopWatch(duration) as stop_watch: - while not stop_watch.expired() and IS_RUNNING: - client.send_msg() - eventlet.sleep() - IS_RUNNING = False - else: - LOG.debug("Sending %d messages using client %d", - messages_count, client_id) - for _ in six.moves.range(messages_count): - client.send_msg() - eventlet.sleep() - if not IS_RUNNING: - break - LOG.debug("Client %d has sent %d messages", client_id, messages_count) - - time.sleep(1) # wait for replies to be collected - - -def _rpc_call(client, msg): - try: - res = client.call({}, 'info', message=msg) - except Exception as e: - LOG.exception('Error %s on CALL for message %s', str(e), msg) - raise - else: - LOG.debug("SENT: %s, RCV: %s", msg, res) - return res - - -def _rpc_cast(client, msg): - try: - client.cast({}, 'info', message=msg) - except Exception as e: - LOG.exception('Error %s on CAST for message %s', str(e), msg) - raise - else: - LOG.debug("SENT: %s", msg) - - -def _notify(notification_client, msg): - notification_client.info({}, 'compute.start', msg) - - -def show_server_stats(endpoint, json_filename): - LOG.info('=' * 35 + ' summary ' + '=' * 35) - output = dict(series={}, summary={}) - output['series']['server'] = endpoint.received_messages.get_series() - stats = MessageStatsCollector.calc_stats( - 'server', endpoint.received_messages) - output['summary'] = stats - - if json_filename: - write_json_file(json_filename, output) - - -def show_client_stats(clients, json_filename, has_reply=False): - LOG.info('=' * 35 + ' summary ' + '=' * 35) - output = dict(series={}, summary={}) - - for cl in clients: - cl_id = cl.client_id - output['series']['client_%s' % cl_id] = cl.sent_messages.get_series() - output['series']['error_%s' % cl_id] = cl.errors.get_series() - - if has_reply: - output['series']['round_trip_%s' % cl_id] = ( - cl.round_trip_messages.get_series()) - - sent_stats = MessageStatsCollector.calc_stats( - 'client', *(cl.sent_messages for cl in clients)) - output['summary']['client'] = sent_stats - - error_stats = MessageStatsCollector.calc_stats( - 'error', *(cl.errors for cl in clients)) - output['summary']['error'] = error_stats - - if has_reply: - round_trip_stats = MessageStatsCollector.calc_stats( - 'round-trip', *(cl.round_trip_messages for cl in clients)) - output['summary']['round_trip'] = round_trip_stats - - if json_filename: - write_json_file(json_filename, output) - - -def write_json_file(filename, output): - with open(filename, 'w') as f: - f.write(json.dumps(output)) - LOG.info('Stats are written into %s', filename) - - -class SignalExit(SystemExit): - def __init__(self, signo, exccode=1): - super(SignalExit, self).__init__(exccode) - self.signo = signo - - -def signal_handler(signum, frame): - global IS_RUNNING - IS_RUNNING = False - - raise SignalExit(signum) - - -def _setup_logging(is_debug): - log_level = logging.DEBUG if is_debug else logging.INFO - logging.basicConfig( - stream=sys.stdout, level=log_level, - format="%(asctime)-15s %(levelname)s %(name)s %(message)s") - logging.getLogger().handlers[0].addFilter(LoggingNoParsingFilter()) - for i in ['kombu', 'amqp', 'stevedore', 'qpid.messaging' - 'oslo.messaging._drivers.amqp', ]: - logging.getLogger(i).setLevel(logging.WARN) - - -def main(): - parser = argparse.ArgumentParser( - description='Tools to play with oslo.messaging\'s RPC', - usage=USAGE, - ) - parser.add_argument('--url', dest='url', - default='rabbit://guest:password@localhost/', - help="oslo.messaging transport url") - parser.add_argument('-d', '--debug', dest='debug', type=bool, - default=False, - help="Turn on DEBUG logging level instead of WARN") - parser.add_argument('-tp', '--topic', dest='topic', - default="profiler_topic", - help="Topics to publish/receive messages to/from.") - parser.add_argument('-s', '--server', dest='server', - default="profiler_server", - help="Servers to publish/receive messages to/from.") - parser.add_argument('-tg', '--targets', dest='targets', nargs="+", - default=["profiler_topic.profiler_server"], - help="Targets to publish/receive messages to/from.") - parser.add_argument('-l', dest='duration', type=int, - help='send messages for certain time') - parser.add_argument('-j', '--json', dest='json_filename', - help='File name to store results in JSON format') - parser.add_argument('--config-file', dest='config_file', type=str, - help="Oslo messaging config file") - - subparsers = parser.add_subparsers(dest='mode', - help='notify/rpc server/client mode') - - server = subparsers.add_parser('notify-server') - server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) - server.add_argument('--requeue', dest='requeue', action='store_true') - - server = subparsers.add_parser('batch-notify-server') - server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) - server.add_argument('--requeue', dest='requeue', action='store_true') - - client = subparsers.add_parser('notify-client') - client.add_argument('-p', dest='threads', type=int, default=1, - help='number of client threads') - client.add_argument('-m', dest='messages', type=int, default=1, - help='number of call per threads') - client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, - help='sleep time between two messages') - client.add_argument('--timeout', dest='timeout', type=int, default=3, - help='client timeout') - - server = subparsers.add_parser('rpc-server') - server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) - server.add_argument('-e', '--executor', dest='executor', - type=str, default='eventlet', - help='name of a message executor') - - client = subparsers.add_parser('rpc-client') - client.add_argument('-p', dest='threads', type=int, default=1, - help='number of client threads') - client.add_argument('-m', dest='messages', type=int, default=1, - help='number of call per threads') - client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, - help='sleep time between two messages') - client.add_argument('--timeout', dest='timeout', type=int, default=3, - help='client timeout') - client.add_argument('--exit-wait', dest='exit_wait', type=int, default=0, - help='Keep connections open N seconds after calls ' - 'have been done') - client.add_argument('--is-cast', dest='is_cast', type=bool, default=False, - help='Use `call` or `cast` RPC methods') - client.add_argument('--is-fanout', dest='is_fanout', type=bool, - default=False, help='fanout=True for CAST messages') - - args = parser.parse_args() - - _setup_logging(is_debug=args.debug) - - if args.config_file: - cfg.CONF(["--config-file", args.config_file]) - - global TRANSPORT - if args.mode in ['rpc-server', 'rpc-client']: - TRANSPORT = messaging.get_transport(cfg.CONF, url=args.url) - else: - TRANSPORT = messaging.get_notification_transport(cfg.CONF, - url=args.url) - - if args.mode in ['rpc-client', 'notify-client']: - # always generate maximum number of messages for duration-limited tests - generate_messages(MESSAGES_LIMIT if args.duration else args.messages) - - # oslo.config defaults - cfg.CONF.heartbeat_interval = 5 - cfg.CONF.prog = os.path.basename(__file__) - cfg.CONF.project = 'oslo.messaging' - - signal.signal(signal.SIGTERM, signal_handler) - signal.signal(signal.SIGINT, signal_handler) - - if args.mode == 'rpc-server': - target = messaging.Target(topic=args.topic, server=args.server) - if args.url.startswith('zmq'): - cfg.CONF.oslo_messaging_zmq.rpc_zmq_matchmaker = "redis" - - endpoint = rpc_server(TRANSPORT, target, args.wait_before_answer, - args.executor, args.duration) - show_server_stats(endpoint, args.json_filename) - - elif args.mode == 'notify-server': - endpoint = notify_server(TRANSPORT, args.topic, - args.wait_before_answer, args.duration, - args.requeue) - show_server_stats(endpoint, args.json_filename) - - elif args.mode == 'batch-notify-server': - endpoint = batch_notify_server(TRANSPORT, args.topic, - args.wait_before_answer, - args.duration, args.requeue) - show_server_stats(endpoint, args.json_filename) - - elif args.mode == 'notify-client': - spawn_notify_clients(args.threads, args.topic, TRANSPORT, - args.messages, args.wait_after_msg, - args.timeout, args.duration) - show_client_stats(CLIENTS, args.json_filename) - - elif args.mode == 'rpc-client': - targets = [target.partition('.')[::2] for target in args.targets] - targets = [messaging.Target( - topic=topic, server=server_name, fanout=args.is_fanout) for - topic, server_name in targets] - spawn_rpc_clients(args.threads, TRANSPORT, targets, - args.wait_after_msg, args.timeout, args.is_cast, - args.messages, args.duration) - - show_client_stats(CLIENTS, args.json_filename, not args.is_cast) - - if args.exit_wait: - LOG.info("Finished. waiting for %d seconds", args.exit_wait) - time.sleep(args.exit_wait) - - -if __name__ == '__main__': - RANDOM_GENERATOR = init_random_generator() - CURRENT_PID = os.getpid() - main() diff --git a/tox.ini b/tox.ini deleted file mode 100644 index c023130..0000000 --- a/tox.ini +++ /dev/null @@ -1,99 +0,0 @@ -[tox] -envlist = py35,py34,py27,pep8,bandit - -[testenv] -setenv = - VIRTUAL_ENV={envdir} - OS_TEST_TIMEOUT=30 -passenv = OS_* -deps = -r{toxinidir}/test-requirements.txt -commands = python setup.py testr --slowest --testr-args='{posargs}' - -[testenv:pep8] -commands = flake8 -deps = hacking<0.11,>=0.10.0 - -[testenv:cover] -setenv = VIRTUAL_ENV={envdir} -commands = - python setup.py test --coverage --coverage-package-name=oslo_messaging --testr-args='{posargs}' - -[testenv:venv] -commands = {posargs} - -[testenv:docs] -commands = python setup.py build_sphinx - -[testenv:py27-func-rabbit] -setenv = TRANSPORT_DRIVER=rabbit -commands = pifpaf run rabbitmq -- python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py34-func-rabbit] -setenv = TRANSPORT_DRIVER=rabbit -basepython = python3.4 -commands = pifpaf run rabbitmq -- python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py35-func-rabbit] -setenv = TRANSPORT_DRIVER=rabbit -basepython = python3.5 -commands = pifpaf run rabbitmq -- python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py27-func-pika] -setenv = TRANSPORT_DRIVER=pika -commands = pifpaf run rabbitmq -- python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py27-func-amqp1] -setenv = TRANSPORT_URL=amqp://stackqpid:secretqpid@127.0.0.1:65123// -# NOTE(kgiusti): This gate job runs on Centos 7 for now. -commands = {toxinidir}/setup-test-env-qpid.sh 1.0 python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py34-func-amqp1] -basepython = python3.4 -setenv = TRANSPORT_URL=amqp://stackqpid:secretqpid@127.0.0.1:65123// -# NOTE(kgiusti): This gate job runs on Centos 7 for now. -commands = {toxinidir}/setup-test-env-qpid.sh 1.0 python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py35-func-amqp1] -basepython = python3.5 -setenv = TRANSPORT_URL=amqp://stackqpid:secretqpid@127.0.0.1:65123// -# NOTE(kgiusti): This gate job runs on Centos 7 for now. -commands = {toxinidir}/setup-test-env-qpid.sh 1.0 python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py27-func-zeromq] -commands = {toxinidir}/setup-test-env-zmq.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py34-func-zeromq] -basepython = python3.4 -commands = {toxinidir}/setup-test-env-zmq.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' - -[testenv:py27-func-zeromq-proxy] -commands = {toxinidir}/setup-test-env-zmq-proxy.sh python setup.py testr --slowest --testr-args='oslo_messaging.tests.functional' - -[testenv:py27-func-zeromq-pub-sub] -commands = {toxinidir}/setup-test-env-zmq-pub-sub.sh python setup.py testr --slowest --testr-args='oslo_messaging.tests.functional' - -[testenv:bandit] -deps = -r{toxinidir}/test-requirements.txt -commands = bandit -r oslo_messaging -x tests -n5 - -[flake8] -show-source = True -ignore = H405 -exclude = .tox,dist,doc,*.egg,build,__init__.py - -[hacking] -import_exceptions = - oslo_messaging._i18n - six.moves -local-check-factory = oslo_messaging.hacking.checks.factory - -[testenv:pip-missing-reqs] -# do not install test-requirements as that will pollute the virtualenv for -# determining missing packages -# this also means that pip-missing-reqs must be installed separately, outside -# of the requirements.txt files -deps = pip_missing_reqs -commands = pip-missing-reqs -d --ignore-module=oslo_messaging* --ignore-file=oslo_messaging/tests/* --ignore-file=tests/ oslo_messaging - -[testenv:releasenotes] -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html