From 6add13cf631306420ee13e2e8cb0978e65885509 Mon Sep 17 00:00:00 2001
From: Serg Melikyan <smelikyan@mirantis.com>
Date: Thu, 25 Jul 2013 18:42:24 +0400
Subject: [PATCH] Migrate to Murano Common

Change-Id: I02ef05391f14b489fe8510e80cd4a9b9a516e773
---
 conductor/__init__.py                         |  20 +
 conductor/app.py                              |  30 +-
 conductor/cmd/__init__.py                     |   0
 bin/conductor => conductor/cmd/run.py         |   7 +-
 conductor/commands/windows_agent.py           |   2 +-
 conductor/config.py                           |   6 +-
 conductor/openstack/common/context.py         |  83 ++
 .../openstack/common/eventlet_backdoor.py     |  74 +-
 conductor/openstack/common/exception.py       |  35 +-
 conductor/openstack/common/excutils.py        |  98 ++
 conductor/openstack/common/fileutils.py       | 110 +++
 conductor/openstack/common/gettextutils.py    | 230 ++++-
 conductor/openstack/common/importutils.py     |   7 +-
 conductor/openstack/common/jsonutils.py       |  65 +-
 conductor/openstack/common/lockutils.py       | 276 ++++++
 conductor/openstack/common/log.py             | 129 ++-
 conductor/openstack/common/loopingcall.py     |  68 +-
 conductor/openstack/common/network_utils.py   |  81 ++
 conductor/openstack/common/notifier/api.py    |  42 +-
 .../openstack/common/notifier/log_notifier.py |   4 +-
 .../common/notifier/no_op_notifier.py         |   2 +-
 .../openstack/common/notifier/rpc_notifier.py |   4 +-
 .../common/notifier/rpc_notifier2.py          |   4 +-
 conductor/openstack/common/rpc/__init__.py    | 307 +++++++
 conductor/openstack/common/rpc/amqp.py        | 610 ++++++++++++
 conductor/openstack/common/rpc/common.py      | 509 +++++++++++
 conductor/openstack/common/rpc/dispatcher.py  | 178 ++++
 conductor/openstack/common/rpc/impl_fake.py   | 195 ++++
 conductor/openstack/common/rpc/impl_kombu.py  | 865 ++++++++++++++++++
 conductor/openstack/common/rpc/impl_qpid.py   | 739 +++++++++++++++
 conductor/openstack/common/rpc/impl_zmq.py    | 817 +++++++++++++++++
 conductor/openstack/common/rpc/matchmaker.py  | 330 +++++++
 .../openstack/common/rpc/matchmaker_redis.py  | 145 +++
 .../openstack/common/rpc/matchmaker_ring.py   | 110 +++
 conductor/openstack/common/rpc/proxy.py       | 226 +++++
 conductor/openstack/common/rpc/serializer.py  |  52 ++
 conductor/openstack/common/rpc/service.py     |  78 ++
 .../openstack/common/rpc/zmq_receiver.py      |  41 +
 conductor/openstack/common/service.py         |  86 +-
 conductor/openstack/common/setup.py           | 367 --------
 conductor/openstack/common/sslutils.py        |  24 +-
 conductor/openstack/common/threadgroup.py     |  21 +-
 conductor/openstack/common/timeutils.py       |  28 +-
 conductor/openstack/common/version.py         |  94 --
 conductor/openstack/common/wsgi.py            | 797 ----------------
 conductor/openstack/common/xmlutils.py        |   2 +-
 conductor/rabbitmq.py                         | 142 ---
 conductor/reporting.py                        |   4 +-
 conductor/version.py                          |  18 -
 etc/conductor.conf                            |   2 +-
 openstack-common.conf                         |  21 +-
 tools/pip-requires => requirements.txt        |   3 +-
 setup.cfg                                     |  31 +
 setup.py                                      |  53 +-
 tools/test-requires => test-requirements.txt  |   0
 tests/conductor/test_methods.py               |   1 -
 tools/config/generate_sample.sh               |  69 ++
 tools/install_venv.py                         | 175 +---
 tools/install_venv_common.py                  |  86 +-
 tox.ini                                       |   4 +-
 60 files changed, 6775 insertions(+), 1832 deletions(-)
 create mode 100644 conductor/cmd/__init__.py
 rename bin/conductor => conductor/cmd/run.py (97%)
 create mode 100644 conductor/openstack/common/context.py
 create mode 100644 conductor/openstack/common/excutils.py
 create mode 100644 conductor/openstack/common/fileutils.py
 create mode 100644 conductor/openstack/common/lockutils.py
 create mode 100644 conductor/openstack/common/network_utils.py
 create mode 100644 conductor/openstack/common/rpc/__init__.py
 create mode 100644 conductor/openstack/common/rpc/amqp.py
 create mode 100644 conductor/openstack/common/rpc/common.py
 create mode 100644 conductor/openstack/common/rpc/dispatcher.py
 create mode 100644 conductor/openstack/common/rpc/impl_fake.py
 create mode 100644 conductor/openstack/common/rpc/impl_kombu.py
 create mode 100644 conductor/openstack/common/rpc/impl_qpid.py
 create mode 100644 conductor/openstack/common/rpc/impl_zmq.py
 create mode 100644 conductor/openstack/common/rpc/matchmaker.py
 create mode 100644 conductor/openstack/common/rpc/matchmaker_redis.py
 create mode 100644 conductor/openstack/common/rpc/matchmaker_ring.py
 create mode 100644 conductor/openstack/common/rpc/proxy.py
 create mode 100644 conductor/openstack/common/rpc/serializer.py
 create mode 100644 conductor/openstack/common/rpc/service.py
 create mode 100755 conductor/openstack/common/rpc/zmq_receiver.py
 delete mode 100644 conductor/openstack/common/setup.py
 delete mode 100644 conductor/openstack/common/version.py
 delete mode 100644 conductor/openstack/common/wsgi.py
 delete mode 100644 conductor/rabbitmq.py
 delete mode 100644 conductor/version.py
 rename tools/pip-requires => requirements.txt (52%)
 rename tools/test-requires => test-requirements.txt (100%)
 create mode 100755 tools/config/generate_sample.sh

diff --git a/conductor/__init__.py b/conductor/__init__.py
index e69de29..edc03dd 100644
--- a/conductor/__init__.py
+++ b/conductor/__init__.py
@@ -0,0 +1,20 @@
+#    Copyright (c) 2013 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import gettext
+gettext.install('conductor', './conductor/locale', unicode=1)
+
+from pbr import version
+__version_info = version.VersionInfo('conductor')
+__version__ = __version_info.cached_version_string()
diff --git a/conductor/app.py b/conductor/app.py
index abaf74d..c286cb0 100644
--- a/conductor/app.py
+++ b/conductor/app.py
@@ -25,25 +25,35 @@ from commands.dispatcher import CommandDispatcher
 from openstack.common import log as logging
 from config import Config
 import reporting
-import rabbitmq
+from muranocommon.mq import MqClient, Message
+from conductor import config as cfg
 
 import windows_agent
 import cloud_formation
 
 config = Config(sys.argv[1] if len(sys.argv) > 1 else None)
 
+rabbitmq = cfg.CONF.rabbitmq
 log = logging.getLogger(__name__)
 
+CONNECTION_PARAMS = {
+    'login': rabbitmq.login,
+    'password': rabbitmq.password,
+    'host': rabbitmq.host,
+    'port': rabbitmq.port,
+    'virtual_host': rabbitmq.virtual_host
+}
+
 
 def task_received(task, message_id):
-    with rabbitmq.RmqClient() as rmqclient:
+    with MqClient(**CONNECTION_PARAMS) as mq:
         try:
             log.info('Starting processing task {0}: {1}'.format(
                 message_id, anyjson.dumps(task)))
-            reporter = reporting.Reporter(rmqclient, message_id, task['id'])
+            reporter = reporting.Reporter(mq, message_id, task['id'])
 
             command_dispatcher = CommandDispatcher(
-                'e' + task['id'], rmqclient, task['token'], task['tenant_id'])
+                'e' + task['id'], mq, task['token'], task['tenant_id'])
             workflows = []
             for path in glob.glob("data/workflows/*.xml"):
                 log.debug('Loading XML {0}'.format(path))
@@ -69,11 +79,11 @@ def task_received(task, message_id):
             command_dispatcher.close()
         finally:
             del task['token']
-            result_msg = rabbitmq.Message()
+            result_msg = Message()
             result_msg.body = task
             result_msg.id = message_id
 
-            rmqclient.send(message=result_msg, key='task-results')
+            mq.send(message=result_msg, key='task-results')
     log.info('Finished processing task {0}. Result = {1}'.format(
         message_id, anyjson.dumps(task)))
 
@@ -92,10 +102,10 @@ class ConductorWorkflowService(service.Service):
     def _start_rabbitmq(self):
         while True:
             try:
-                with rabbitmq.RmqClient() as rmq:
-                    rmq.declare('tasks', 'tasks')
-                    rmq.declare('task-results')
-                    with rmq.open('tasks') as subscription:
+                with MqClient(**CONNECTION_PARAMS) as mq:
+                    mq.declare('tasks', 'tasks')
+                    mq.declare('task-results')
+                    with mq.open('tasks') as subscription:
                         while True:
                             msg = subscription.get_message()
                             self.tg.add_thread(
diff --git a/conductor/cmd/__init__.py b/conductor/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/bin/conductor b/conductor/cmd/run.py
similarity index 97%
rename from bin/conductor
rename to conductor/cmd/run.py
index 4be245e..98040f9 100644
--- a/bin/conductor
+++ b/conductor/cmd/run.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 # Copyright (c) 2013 Mirantis Inc.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,7 +22,7 @@ from conductor.openstack.common import service
 from conductor.app import ConductorWorkflowService
 
 
-if __name__ == '__main__':
+def main():
     try:
         config.parse_args()
         os.chdir(config.CONF.data_dir)
@@ -34,3 +33,7 @@ if __name__ == '__main__':
     except RuntimeError, e:
         sys.stderr.write("ERROR: %s\n" % e)
         sys.exit(1)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/conductor/commands/windows_agent.py b/conductor/commands/windows_agent.py
index a8e6a09..59f8e4a 100644
--- a/conductor/commands/windows_agent.py
+++ b/conductor/commands/windows_agent.py
@@ -2,7 +2,7 @@ import json
 import uuid
 
 from conductor.openstack.common import log as logging
-from conductor.rabbitmq import Message
+from muranocommon.mq import Message
 import conductor.helpers
 from command import CommandBase
 
diff --git a/conductor/config.py b/conductor/config.py
index 362833c..d55e1d9 100644
--- a/conductor/config.py
+++ b/conductor/config.py
@@ -26,7 +26,7 @@ import sys
 from oslo.config import cfg
 from paste import deploy
 
-from conductor.version import version_info as version
+from conductor import __version__ as version
 from ConfigParser import SafeConfigParser
 
 paste_deploy_opts = [
@@ -68,7 +68,7 @@ CONF.import_opt('syslog_log_facility', 'conductor.openstack.common.log')
 def parse_args(args=None, usage=None, default_config_files=None):
     CONF(args=args,
          project='conductor',
-         version=version.cached_version_string(),
+         version=version,
          usage=usage,
          default_config_files=default_config_files)
 
@@ -195,7 +195,7 @@ def load_paste_app(app_name=None):
 
 
 class Config(object):
-    CONFIG_PATH = './etc/app.config'
+    CONFIG_PATH = './etc/conductor.conf'
 
     def __init__(self, filename=None):
         self.config = SafeConfigParser()
diff --git a/conductor/openstack/common/context.py b/conductor/openstack/common/context.py
new file mode 100644
index 0000000..f9c5aed
--- /dev/null
+++ b/conductor/openstack/common/context.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Simple class that stores security context information in the web request.
+
+Projects should subclass this class if they wish to enhance the request
+context or provide additional information in their specific WSGI pipeline.
+"""
+
+import itertools
+
+from conductor.openstack.common import uuidutils
+
+
+def generate_request_id():
+    return 'req-%s' % uuidutils.generate_uuid()
+
+
+class RequestContext(object):
+
+    """Helper class to represent useful information about a request context.
+
+    Stores information about the security context under which the user
+    accesses the system, as well as additional request information.
+    """
+
+    def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
+                 read_only=False, show_deleted=False, request_id=None):
+        self.auth_token = auth_token
+        self.user = user
+        self.tenant = tenant
+        self.is_admin = is_admin
+        self.read_only = read_only
+        self.show_deleted = show_deleted
+        if not request_id:
+            request_id = generate_request_id()
+        self.request_id = request_id
+
+    def to_dict(self):
+        return {'user': self.user,
+                'tenant': self.tenant,
+                'is_admin': self.is_admin,
+                'read_only': self.read_only,
+                'show_deleted': self.show_deleted,
+                'auth_token': self.auth_token,
+                'request_id': self.request_id}
+
+
+def get_admin_context(show_deleted=False):
+    context = RequestContext(None,
+                             tenant=None,
+                             is_admin=True,
+                             show_deleted=show_deleted)
+    return context
+
+
+def get_context_from_function_and_args(function, args, kwargs):
+    """Find an arg of type RequestContext and return it.
+
+       This is useful in a couple of decorators where we don't
+       know much about the function we're wrapping.
+    """
+
+    for arg in itertools.chain(kwargs.values(), args):
+        if isinstance(arg, RequestContext):
+            return arg
+
+    return None
diff --git a/conductor/openstack/common/eventlet_backdoor.py b/conductor/openstack/common/eventlet_backdoor.py
index c0ad460..dd93792 100644
--- a/conductor/openstack/common/eventlet_backdoor.py
+++ b/conductor/openstack/common/eventlet_backdoor.py
@@ -16,8 +16,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from __future__ import print_function
+
+import errno
 import gc
+import os
 import pprint
+import socket
 import sys
 import traceback
 
@@ -26,18 +31,38 @@ import eventlet.backdoor
 import greenlet
 from oslo.config import cfg
 
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import log as logging
+
+help_for_backdoor_port = (
+    "Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
+    "in listening on a random tcp port number; <port> results in listening "
+    "on the specified port number (and not enabling backdoor if that port "
+    "is in use); and <start>:<end> results in listening on the smallest "
+    "unused port number within the specified range of port numbers.  The "
+    "chosen port is displayed in the service's log file.")
 eventlet_backdoor_opts = [
-    cfg.IntOpt('backdoor_port',
+    cfg.StrOpt('backdoor_port',
                default=None,
-               help='port for eventlet backdoor to listen')
+               help="Enable eventlet backdoor.  %s" % help_for_backdoor_port)
 ]
 
 CONF = cfg.CONF
 CONF.register_opts(eventlet_backdoor_opts)
+LOG = logging.getLogger(__name__)
+
+
+class EventletBackdoorConfigValueError(Exception):
+    def __init__(self, port_range, help_msg, ex):
+        msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
+               '%(help)s' %
+               {'range': port_range, 'ex': ex, 'help': help_msg})
+        super(EventletBackdoorConfigValueError, self).__init__(msg)
+        self.port_range = port_range
 
 
 def _dont_use_this():
-    print "Don't use this, just disconnect instead"
+    print("Don't use this, just disconnect instead")
 
 
 def _find_objects(t):
@@ -46,16 +71,42 @@ def _find_objects(t):
 
 def _print_greenthreads():
     for i, gt in enumerate(_find_objects(greenlet.greenlet)):
-        print i, gt
+        print(i, gt)
         traceback.print_stack(gt.gr_frame)
-        print
+        print()
 
 
 def _print_nativethreads():
     for threadId, stack in sys._current_frames().items():
-        print threadId
+        print(threadId)
         traceback.print_stack(stack)
-        print
+        print()
+
+
+def _parse_port_range(port_range):
+    if ':' not in port_range:
+        start, end = port_range, port_range
+    else:
+        start, end = port_range.split(':', 1)
+    try:
+        start, end = int(start), int(end)
+        if end < start:
+            raise ValueError
+        return start, end
+    except ValueError as ex:
+        raise EventletBackdoorConfigValueError(port_range, ex,
+                                               help_for_backdoor_port)
+
+
+def _listen(host, start_port, end_port, listen_func):
+    try_port = start_port
+    while True:
+        try:
+            return listen_func((host, try_port))
+        except socket.error as exc:
+            if (exc.errno != errno.EADDRINUSE or try_port >= end_port):
+                raise
+            try_port += 1
 
 
 def initialize_if_enabled():
@@ -70,6 +121,8 @@ def initialize_if_enabled():
     if CONF.backdoor_port is None:
         return None
 
+    start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
+
     # NOTE(johannes): The standard sys.displayhook will print the value of
     # the last expression and set it to __builtin__._, which overwrites
     # the __builtin__._ that gettext sets. Let's switch to using pprint
@@ -80,8 +133,13 @@ def initialize_if_enabled():
             pprint.pprint(val)
     sys.displayhook = displayhook
 
-    sock = eventlet.listen(('localhost', CONF.backdoor_port))
+    sock = _listen('localhost', start_port, end_port, eventlet.listen)
+
+    # In the case of backdoor port being zero, a port number is assigned by
+    # listen().  In any case, pull the port number out here.
     port = sock.getsockname()[1]
+    LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
+             {'port': port, 'pid': os.getpid()})
     eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
                      locals=backdoor_locals)
     return port
diff --git a/conductor/openstack/common/exception.py b/conductor/openstack/common/exception.py
index 5890c58..1707229 100644
--- a/conductor/openstack/common/exception.py
+++ b/conductor/openstack/common/exception.py
@@ -21,7 +21,7 @@ Exceptions common to OpenStack projects
 
 import logging
 
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
 
 _FATAL_EXCEPTION_FORMAT_ERRORS = False
 
@@ -33,7 +33,7 @@ class Error(Exception):
 
 class ApiError(Error):
     def __init__(self, message='Unknown', code='Unknown'):
-        self.message = message
+        self.api_message = message
         self.code = code
         super(ApiError, self).__init__('%s: %s' % (code, message))
 
@@ -44,19 +44,19 @@ class NotFound(Error):
 
 class UnknownScheme(Error):
 
-    msg = "Unknown scheme '%s' found in URI"
+    msg_fmt = "Unknown scheme '%s' found in URI"
 
     def __init__(self, scheme):
-        msg = self.__class__.msg % scheme
+        msg = self.msg_fmt % scheme
         super(UnknownScheme, self).__init__(msg)
 
 
 class BadStoreUri(Error):
 
-    msg = "The Store URI %s was malformed. Reason: %s"
+    msg_fmt = "The Store URI %s was malformed. Reason: %s"
 
     def __init__(self, uri, reason):
-        msg = self.__class__.msg % (uri, reason)
+        msg = self.msg_fmt % (uri, reason)
         super(BadStoreUri, self).__init__(msg)
 
 
@@ -98,11 +98,9 @@ def wrap_exception(f):
     def _wrap(*args, **kw):
         try:
             return f(*args, **kw)
-        except Exception, e:
+        except Exception as e:
             if not isinstance(e, Error):
-                #exc_type, exc_value, exc_traceback = sys.exc_info()
                 logging.exception(_('Uncaught exception'))
-                #logging.error(traceback.extract_stack(exc_traceback))
                 raise Error(str(e))
             raise
     _wrap.func_name = f.func_name
@@ -110,33 +108,32 @@ def wrap_exception(f):
 
 
 class OpenstackException(Exception):
-    """
-    Base Exception
+    """Base Exception class.
 
     To correctly use this class, inherit from it and define
-    a 'message' property. That message will get printf'd
+    a 'msg_fmt' property. That message will get printf'd
     with the keyword arguments provided to the constructor.
     """
-    message = "An unknown exception occurred"
+    msg_fmt = "An unknown exception occurred"
 
     def __init__(self, **kwargs):
         try:
-            self._error_string = self.message % kwargs
+            self._error_string = self.msg_fmt % kwargs
 
-        except Exception as e:
+        except Exception:
             if _FATAL_EXCEPTION_FORMAT_ERRORS:
-                raise e
+                raise
             else:
                 # at least get the core message out if something happened
-                self._error_string = self.message
+                self._error_string = self.msg_fmt
 
     def __str__(self):
         return self._error_string
 
 
 class MalformedRequestBody(OpenstackException):
-    message = "Malformed message body: %(reason)s"
+    msg_fmt = "Malformed message body: %(reason)s"
 
 
 class InvalidContentType(OpenstackException):
-    message = "Invalid content type %(content_type)s"
+    msg_fmt = "Invalid content type %(content_type)s"
diff --git a/conductor/openstack/common/excutils.py b/conductor/openstack/common/excutils.py
new file mode 100644
index 0000000..3c8ce9a
--- /dev/null
+++ b/conductor/openstack/common/excutils.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# Copyright 2012, Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Exception related utilities.
+"""
+
+import logging
+import sys
+import time
+import traceback
+
+from conductor.openstack.common.gettextutils import _  # noqa
+
+
+class save_and_reraise_exception(object):
+    """Save current exception, run some code and then re-raise.
+
+    In some cases the exception context can be cleared, resulting in None
+    being attempted to be re-raised after an exception handler is run. This
+    can happen when eventlet switches greenthreads or when running an
+    exception handler, code raises and catches an exception. In both
+    cases the exception context will be cleared.
+
+    To work around this, we save the exception state, run handler code, and
+    then re-raise the original exception. If another exception occurs, the
+    saved exception is logged and the new exception is re-raised.
+
+    In some cases the caller may not want to re-raise the exception, and
+    for those circumstances this context provides a reraise flag that
+    can be used to suppress the exception.  For example:
+
+    except Exception:
+        with save_and_reraise_exception() as ctxt:
+            decide_if_need_reraise()
+            if not should_be_reraised:
+                ctxt.reraise = False
+    """
+    def __init__(self):
+        self.reraise = True
+
+    def __enter__(self):
+        self.type_, self.value, self.tb, = sys.exc_info()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if exc_type is not None:
+            logging.error(_('Original exception being dropped: %s'),
+                          traceback.format_exception(self.type_,
+                                                     self.value,
+                                                     self.tb))
+            return False
+        if self.reraise:
+            raise self.type_, self.value, self.tb
+
+
+def forever_retry_uncaught_exceptions(infunc):
+    def inner_func(*args, **kwargs):
+        last_log_time = 0
+        last_exc_message = None
+        exc_count = 0
+        while True:
+            try:
+                return infunc(*args, **kwargs)
+            except Exception as exc:
+                if exc.message == last_exc_message:
+                    exc_count += 1
+                else:
+                    exc_count = 1
+                # Do not log any more frequently than once a minute unless
+                # the exception message changes
+                cur_time = int(time.time())
+                if (cur_time - last_log_time > 60 or
+                        exc.message != last_exc_message):
+                    logging.exception(
+                        _('Unexpected exception occurred %d time(s)... '
+                          'retrying.') % exc_count)
+                    last_log_time = cur_time
+                    last_exc_message = exc.message
+                    exc_count = 0
+                # This should be a very rare event. In case it isn't, do
+                # a sleep.
+                time.sleep(1)
+    return inner_func
diff --git a/conductor/openstack/common/fileutils.py b/conductor/openstack/common/fileutils.py
new file mode 100644
index 0000000..6cea6d2
--- /dev/null
+++ b/conductor/openstack/common/fileutils.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import contextlib
+import errno
+import os
+
+from conductor.openstack.common import excutils
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+_FILE_CACHE = {}
+
+
+def ensure_tree(path):
+    """Create a directory (and any ancestor directories required)
+
+    :param path: Directory to create
+    """
+    try:
+        os.makedirs(path)
+    except OSError as exc:
+        if exc.errno == errno.EEXIST:
+            if not os.path.isdir(path):
+                raise
+        else:
+            raise
+
+
+def read_cached_file(filename, force_reload=False):
+    """Read from a file if it has been modified.
+
+    :param force_reload: Whether to reload the file.
+    :returns: A tuple with a boolean specifying if the data is fresh
+              or not.
+    """
+    global _FILE_CACHE
+
+    if force_reload and filename in _FILE_CACHE:
+        del _FILE_CACHE[filename]
+
+    reloaded = False
+    mtime = os.path.getmtime(filename)
+    cache_info = _FILE_CACHE.setdefault(filename, {})
+
+    if not cache_info or mtime > cache_info.get('mtime', 0):
+        LOG.debug(_("Reloading cached file %s") % filename)
+        with open(filename) as fap:
+            cache_info['data'] = fap.read()
+        cache_info['mtime'] = mtime
+        reloaded = True
+    return (reloaded, cache_info['data'])
+
+
+def delete_if_exists(path):
+    """Delete a file, but ignore file not found error.
+
+    :param path: File to delete
+    """
+
+    try:
+        os.unlink(path)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            return
+        else:
+            raise
+
+
+@contextlib.contextmanager
+def remove_path_on_error(path):
+    """Protect code that wants to operate on PATH atomically.
+    Any exception will cause PATH to be removed.
+
+    :param path: File to work with
+    """
+    try:
+        yield
+    except Exception:
+        with excutils.save_and_reraise_exception():
+            delete_if_exists(path)
+
+
+def file_open(*args, **kwargs):
+    """Open file
+
+    see built-in file() documentation for more details
+
+    Note: The reason this is kept in a separate module is to easily
+    be able to provide a stub module that doesn't alter system
+    state at all (for unit tests)
+    """
+    return file(*args, **kwargs)
diff --git a/conductor/openstack/common/gettextutils.py b/conductor/openstack/common/gettextutils.py
index 7537a7b..a7e408f 100644
--- a/conductor/openstack/common/gettextutils.py
+++ b/conductor/openstack/common/gettextutils.py
@@ -2,6 +2,7 @@
 
 # Copyright 2012 Red Hat, Inc.
 # All Rights Reserved.
+# Copyright 2013 IBM Corp.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -23,11 +24,236 @@ Usual usage in an openstack.common module:
     from conductor.openstack.common.gettextutils import _
 """
 
+import copy
 import gettext
+import logging.handlers
+import os
+import re
+import UserString
 
+import six
 
-t = gettext.translation('openstack-common', 'locale', fallback=True)
+_localedir = os.environ.get('conductor'.upper() + '_LOCALEDIR')
+_t = gettext.translation('conductor', localedir=_localedir, fallback=True)
 
 
 def _(msg):
-    return t.ugettext(msg)
+    return _t.ugettext(msg)
+
+
+def install(domain):
+    """Install a _() function using the given translation domain.
+
+    Given a translation domain, install a _() function using gettext's
+    install() function.
+
+    The main difference from gettext.install() is that we allow
+    overriding the default localedir (e.g. /usr/share/locale) using
+    a translation-domain-specific environment variable (e.g.
+    NOVA_LOCALEDIR).
+    """
+    gettext.install(domain,
+                    localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
+                    unicode=True)
+
+
+"""
+Lazy gettext functionality.
+
+The following is an attempt to introduce a deferred way
+to do translations on messages in OpenStack. We attempt to
+override the standard _() function and % (format string) operation
+to build Message objects that can later be translated when we have
+more information. Also included is an example LogHandler that
+translates Messages to an associated locale, effectively allowing
+many logs, each with their own locale.
+"""
+
+
+def get_lazy_gettext(domain):
+    """Assemble and return a lazy gettext function for a given domain.
+
+    Factory method for a project/module to get a lazy gettext function
+    for its own translation domain (i.e. nova, glance, cinder, etc.)
+    """
+
+    def _lazy_gettext(msg):
+        """Create and return a Message object.
+
+        Message encapsulates a string so that we can translate it later when
+        needed.
+        """
+        return Message(msg, domain)
+
+    return _lazy_gettext
+
+
+class Message(UserString.UserString, object):
+    """Class used to encapsulate translatable messages."""
+    def __init__(self, msg, domain):
+        # _msg is the gettext msgid and should never change
+        self._msg = msg
+        self._left_extra_msg = ''
+        self._right_extra_msg = ''
+        self.params = None
+        self.locale = None
+        self.domain = domain
+
+    @property
+    def data(self):
+        # NOTE(mrodden): this should always resolve to a unicode string
+        # that best represents the state of the message currently
+
+        localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
+        if self.locale:
+            lang = gettext.translation(self.domain,
+                                       localedir=localedir,
+                                       languages=[self.locale],
+                                       fallback=True)
+        else:
+            # use system locale for translations
+            lang = gettext.translation(self.domain,
+                                       localedir=localedir,
+                                       fallback=True)
+
+        full_msg = (self._left_extra_msg +
+                    lang.ugettext(self._msg) +
+                    self._right_extra_msg)
+
+        if self.params is not None:
+            full_msg = full_msg % self.params
+
+        return six.text_type(full_msg)
+
+    def _save_dictionary_parameter(self, dict_param):
+        full_msg = self.data
+        # look for %(blah) fields in string;
+        # ignore %% and deal with the
+        # case where % is first character on the line
+        keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
+
+        # if we don't find any %(blah) blocks but have a %s
+        if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
+            # apparently the full dictionary is the parameter
+            params = copy.deepcopy(dict_param)
+        else:
+            params = {}
+            for key in keys:
+                try:
+                    params[key] = copy.deepcopy(dict_param[key])
+                except TypeError:
+                    # cast uncopyable thing to unicode string
+                    params[key] = unicode(dict_param[key])
+
+        return params
+
+    def _save_parameters(self, other):
+        # we check for None later to see if
+        # we actually have parameters to inject,
+        # so encapsulate if our parameter is actually None
+        if other is None:
+            self.params = (other, )
+        elif isinstance(other, dict):
+            self.params = self._save_dictionary_parameter(other)
+        else:
+            # fallback to casting to unicode,
+            # this will handle the problematic python code-like
+            # objects that cannot be deep-copied
+            try:
+                self.params = copy.deepcopy(other)
+            except TypeError:
+                self.params = unicode(other)
+
+        return self
+
+    # overrides to be more string-like
+    def __unicode__(self):
+        return self.data
+
+    def __str__(self):
+        return self.data.encode('utf-8')
+
+    def __getstate__(self):
+        to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
+                   'domain', 'params', 'locale']
+        new_dict = self.__dict__.fromkeys(to_copy)
+        for attr in to_copy:
+            new_dict[attr] = copy.deepcopy(self.__dict__[attr])
+
+        return new_dict
+
+    def __setstate__(self, state):
+        for (k, v) in state.items():
+            setattr(self, k, v)
+
+    # operator overloads
+    def __add__(self, other):
+        copied = copy.deepcopy(self)
+        copied._right_extra_msg += other.__str__()
+        return copied
+
+    def __radd__(self, other):
+        copied = copy.deepcopy(self)
+        copied._left_extra_msg += other.__str__()
+        return copied
+
+    def __mod__(self, other):
+        # do a format string to catch and raise
+        # any possible KeyErrors from missing parameters
+        self.data % other
+        copied = copy.deepcopy(self)
+        return copied._save_parameters(other)
+
+    def __mul__(self, other):
+        return self.data * other
+
+    def __rmul__(self, other):
+        return other * self.data
+
+    def __getitem__(self, key):
+        return self.data[key]
+
+    def __getslice__(self, start, end):
+        return self.data.__getslice__(start, end)
+
+    def __getattribute__(self, name):
+        # NOTE(mrodden): handle lossy operations that we can't deal with yet
+        # These override the UserString implementation, since UserString
+        # uses our __class__ attribute to try and build a new message
+        # after running the inner data string through the operation.
+        # At that point, we have lost the gettext message id and can just
+        # safely resolve to a string instead.
+        ops = ['capitalize', 'center', 'decode', 'encode',
+               'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
+               'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
+        if name in ops:
+            return getattr(self.data, name)
+        else:
+            return UserString.UserString.__getattribute__(self, name)
+
+
+class LocaleHandler(logging.Handler):
+    """Handler that can have a locale associated to translate Messages.
+
+    A quick example of how to utilize the Message class above.
+    LocaleHandler takes a locale and a target logging.Handler object
+    to forward LogRecord objects to after translating the internal Message.
+    """
+
+    def __init__(self, locale, target):
+        """Initialize a LocaleHandler
+
+        :param locale: locale to use for translating messages
+        :param target: logging.Handler object to forward
+                       LogRecord objects to after translation
+        """
+        logging.Handler.__init__(self)
+        self.locale = locale
+        self.target = target
+
+    def emit(self, record):
+        if isinstance(record.msg, Message):
+            # set the locale and resolve to a string
+            record.msg.locale = self.locale
+
+        self.target.emit(record)
diff --git a/conductor/openstack/common/importutils.py b/conductor/openstack/common/importutils.py
index 3bd277f..7a303f9 100644
--- a/conductor/openstack/common/importutils.py
+++ b/conductor/openstack/common/importutils.py
@@ -24,7 +24,7 @@ import traceback
 
 
 def import_class(import_str):
-    """Returns a class from a string including module and class"""
+    """Returns a class from a string including module and class."""
     mod_str, _sep, class_str = import_str.rpartition('.')
     try:
         __import__(mod_str)
@@ -41,8 +41,9 @@ def import_object(import_str, *args, **kwargs):
 
 
 def import_object_ns(name_space, import_str, *args, **kwargs):
-    """
-    Import a class and return an instance of it, first by trying
+    """Tries to import object from default namespace.
+
+    Imports a class and return an instance of it, first by trying
     to find the class in a default namespace, then failing back to
     a full path if not found in the default namespace.
     """
diff --git a/conductor/openstack/common/jsonutils.py b/conductor/openstack/common/jsonutils.py
index 4d3ddd0..63d0aef 100644
--- a/conductor/openstack/common/jsonutils.py
+++ b/conductor/openstack/common/jsonutils.py
@@ -38,11 +38,24 @@ import functools
 import inspect
 import itertools
 import json
+import types
 import xmlrpclib
 
+import netaddr
+import six
+
 from conductor.openstack.common import timeutils
 
 
+_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
+                     inspect.isfunction, inspect.isgeneratorfunction,
+                     inspect.isgenerator, inspect.istraceback, inspect.isframe,
+                     inspect.iscode, inspect.isbuiltin, inspect.isroutine,
+                     inspect.isabstract]
+
+_simple_types = (types.NoneType, int, basestring, bool, float, long)
+
+
 def to_primitive(value, convert_instances=False, convert_datetime=True,
                  level=0, max_depth=3):
     """Convert a complex object into primitives.
@@ -58,19 +71,32 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
     Therefore, convert_instances=True is lossy ... be aware.
 
     """
-    nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
-             inspect.isfunction, inspect.isgeneratorfunction,
-             inspect.isgenerator, inspect.istraceback, inspect.isframe,
-             inspect.iscode, inspect.isbuiltin, inspect.isroutine,
-             inspect.isabstract]
-    for test in nasty:
-        if test(value):
-            return unicode(value)
+    # handle obvious types first - order of basic types determined by running
+    # full tests on nova project, resulting in the following counts:
+    # 572754 <type 'NoneType'>
+    # 460353 <type 'int'>
+    # 379632 <type 'unicode'>
+    # 274610 <type 'str'>
+    # 199918 <type 'dict'>
+    # 114200 <type 'datetime.datetime'>
+    #  51817 <type 'bool'>
+    #  26164 <type 'list'>
+    #   6491 <type 'float'>
+    #    283 <type 'tuple'>
+    #     19 <type 'long'>
+    if isinstance(value, _simple_types):
+        return value
 
-    # value of itertools.count doesn't get caught by inspects
-    # above and results in infinite loop when list(value) is called.
+    if isinstance(value, datetime.datetime):
+        if convert_datetime:
+            return timeutils.strtime(value)
+        else:
+            return value
+
+    # value of itertools.count doesn't get caught by nasty_type_tests
+    # and results in infinite loop when list(value) is called.
     if type(value) == itertools.count:
-        return unicode(value)
+        return six.text_type(value)
 
     # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
     #              tests that raise an exception in a mocked method that
@@ -91,17 +117,18 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
                                       convert_datetime=convert_datetime,
                                       level=level,
                                       max_depth=max_depth)
+        if isinstance(value, dict):
+            return dict((k, recursive(v)) for k, v in value.iteritems())
+        elif isinstance(value, (list, tuple)):
+            return [recursive(lv) for lv in value]
+
         # It's not clear why xmlrpclib created their own DateTime type, but
         # for our purposes, make it a datetime type which is explicitly
         # handled
         if isinstance(value, xmlrpclib.DateTime):
             value = datetime.datetime(*tuple(value.timetuple())[:6])
 
-        if isinstance(value, (list, tuple)):
-            return [recursive(v) for v in value]
-        elif isinstance(value, dict):
-            return dict((k, recursive(v)) for k, v in value.iteritems())
-        elif convert_datetime and isinstance(value, datetime.datetime):
+        if convert_datetime and isinstance(value, datetime.datetime):
             return timeutils.strtime(value)
         elif hasattr(value, 'iteritems'):
             return recursive(dict(value.iteritems()), level=level + 1)
@@ -111,12 +138,16 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
             # Likely an instance of something. Watch for cycles.
             # Ignore class member vars.
             return recursive(value.__dict__, level=level + 1)
+        elif isinstance(value, netaddr.IPAddress):
+            return six.text_type(value)
         else:
+            if any(test(value) for test in _nasty_type_tests):
+                return six.text_type(value)
             return value
     except TypeError:
         # Class objects are tricky since they may define something like
         # __iter__ defined but it isn't callable as list().
-        return unicode(value)
+        return six.text_type(value)
 
 
 def dumps(value, default=to_primitive, **kwargs):
diff --git a/conductor/openstack/common/lockutils.py b/conductor/openstack/common/lockutils.py
new file mode 100644
index 0000000..37acb2d
--- /dev/null
+++ b/conductor/openstack/common/lockutils.py
@@ -0,0 +1,276 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import contextlib
+import errno
+import functools
+import os
+import time
+import weakref
+
+from eventlet import semaphore
+from oslo.config import cfg
+
+from conductor.openstack.common import fileutils
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import local
+from conductor.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+util_opts = [
+    cfg.BoolOpt('disable_process_locking', default=False,
+                help='Whether to disable inter-process locks'),
+    cfg.StrOpt('lock_path',
+               help=('Directory to use for lock files.'))
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(util_opts)
+
+
+def set_defaults(lock_path):
+    cfg.set_defaults(util_opts, lock_path=lock_path)
+
+
+class _InterProcessLock(object):
+    """Lock implementation which allows multiple locks, working around
+    issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
+    not require any cleanup. Since the lock is always held on a file
+    descriptor rather than outside of the process, the lock gets dropped
+    automatically if the process crashes, even if __exit__ is not executed.
+
+    There are no guarantees regarding usage by multiple green threads in a
+    single process here. This lock works only between processes. Exclusive
+    access between local threads should be achieved using the semaphores
+    in the @synchronized decorator.
+
+    Note these locks are released when the descriptor is closed, so it's not
+    safe to close the file descriptor while another green thread holds the
+    lock. Just opening and closing the lock file can break synchronisation,
+    so lock files must be accessed only using this abstraction.
+    """
+
+    def __init__(self, name):
+        self.lockfile = None
+        self.fname = name
+
+    def __enter__(self):
+        self.lockfile = open(self.fname, 'w')
+
+        while True:
+            try:
+                # Using non-blocking locks since green threads are not
+                # patched to deal with blocking locking calls.
+                # Also upon reading the MSDN docs for locking(), it seems
+                # to have a laughable 10 attempts "blocking" mechanism.
+                self.trylock()
+                return self
+            except IOError as e:
+                if e.errno in (errno.EACCES, errno.EAGAIN):
+                    # external locks synchronise things like iptables
+                    # updates - give it some time to prevent busy spinning
+                    time.sleep(0.01)
+                else:
+                    raise
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        try:
+            self.unlock()
+            self.lockfile.close()
+        except IOError:
+            LOG.exception(_("Could not release the acquired lock `%s`"),
+                          self.fname)
+
+    def trylock(self):
+        raise NotImplementedError()
+
+    def unlock(self):
+        raise NotImplementedError()
+
+
+class _WindowsLock(_InterProcessLock):
+    def trylock(self):
+        msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
+
+    def unlock(self):
+        msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
+
+
+class _PosixLock(_InterProcessLock):
+    def trylock(self):
+        fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+    def unlock(self):
+        fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
+
+
+if os.name == 'nt':
+    import msvcrt
+    InterProcessLock = _WindowsLock
+else:
+    import fcntl
+    InterProcessLock = _PosixLock
+
+_semaphores = weakref.WeakValueDictionary()
+
+
+@contextlib.contextmanager
+def lock(name, lock_file_prefix=None, external=False, lock_path=None):
+    """Context based lock
+
+    This function yields a `semaphore.Semaphore` instance unless external is
+    True, in which case, it'll yield an InterProcessLock instance.
+
+    :param lock_file_prefix: The lock_file_prefix argument is used to provide
+    lock files on disk with a meaningful prefix.
+
+    :param external: The external keyword argument denotes whether this lock
+    should work across multiple processes. This means that if two different
+    workers both run a a method decorated with @synchronized('mylock',
+    external=True), only one of them will execute at a time.
+
+    :param lock_path: The lock_path keyword argument is used to specify a
+    special location for external lock files to live. If nothing is set, then
+    CONF.lock_path is used as a default.
+    """
+    # NOTE(soren): If we ever go natively threaded, this will be racy.
+    #              See http://stackoverflow.com/questions/5390569/dyn
+    #              amically-allocating-and-destroying-mutexes
+    sem = _semaphores.get(name, semaphore.Semaphore())
+    if name not in _semaphores:
+        # this check is not racy - we're already holding ref locally
+        # so GC won't remove the item and there was no IO switch
+        # (only valid in greenthreads)
+        _semaphores[name] = sem
+
+    with sem:
+        LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
+
+        # NOTE(mikal): I know this looks odd
+        if not hasattr(local.strong_store, 'locks_held'):
+            local.strong_store.locks_held = []
+        local.strong_store.locks_held.append(name)
+
+        try:
+            if external and not CONF.disable_process_locking:
+                LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
+                          {'lock': name})
+
+                # We need a copy of lock_path because it is non-local
+                local_lock_path = lock_path or CONF.lock_path
+                if not local_lock_path:
+                    raise cfg.RequiredOptError('lock_path')
+
+                if not os.path.exists(local_lock_path):
+                    fileutils.ensure_tree(local_lock_path)
+                    LOG.info(_('Created lock path: %s'), local_lock_path)
+
+                def add_prefix(name, prefix):
+                    if not prefix:
+                        return name
+                    sep = '' if prefix.endswith('-') else '-'
+                    return '%s%s%s' % (prefix, sep, name)
+
+                # NOTE(mikal): the lock name cannot contain directory
+                # separators
+                lock_file_name = add_prefix(name.replace(os.sep, '_'),
+                                            lock_file_prefix)
+
+                lock_file_path = os.path.join(local_lock_path, lock_file_name)
+
+                try:
+                    lock = InterProcessLock(lock_file_path)
+                    with lock as lock:
+                        LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
+                                  {'lock': name, 'path': lock_file_path})
+                        yield lock
+                finally:
+                    LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
+                              {'lock': name, 'path': lock_file_path})
+            else:
+                yield sem
+
+        finally:
+            local.strong_store.locks_held.remove(name)
+
+
+def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
+    """Synchronization decorator.
+
+    Decorating a method like so::
+
+        @synchronized('mylock')
+        def foo(self, *args):
+           ...
+
+    ensures that only one thread will execute the foo method at a time.
+
+    Different methods can share the same lock::
+
+        @synchronized('mylock')
+        def foo(self, *args):
+           ...
+
+        @synchronized('mylock')
+        def bar(self, *args):
+           ...
+
+    This way only one of either foo or bar can be executing at a time.
+    """
+
+    def wrap(f):
+        @functools.wraps(f)
+        def inner(*args, **kwargs):
+            with lock(name, lock_file_prefix, external, lock_path):
+                LOG.debug(_('Got semaphore / lock "%(function)s"'),
+                          {'function': f.__name__})
+                return f(*args, **kwargs)
+
+            LOG.debug(_('Semaphore / lock released "%(function)s"'),
+                      {'function': f.__name__})
+        return inner
+    return wrap
+
+
+def synchronized_with_prefix(lock_file_prefix):
+    """Partial object generator for the synchronization decorator.
+
+    Redefine @synchronized in each project like so::
+
+        (in nova/utils.py)
+        from nova.openstack.common import lockutils
+
+        synchronized = lockutils.synchronized_with_prefix('nova-')
+
+
+        (in nova/foo.py)
+        from nova import utils
+
+        @utils.synchronized('mylock')
+        def bar(self, *args):
+           ...
+
+    The lock_file_prefix argument is used to provide lock files on disk with a
+    meaningful prefix.
+    """
+
+    return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
diff --git a/conductor/openstack/common/log.py b/conductor/openstack/common/log.py
index 98cec96..1dd9283 100644
--- a/conductor/openstack/common/log.py
+++ b/conductor/openstack/common/log.py
@@ -29,26 +29,24 @@ It also allows setting of formatting information through conf.
 
 """
 
-import cStringIO
 import inspect
 import itertools
 import logging
 import logging.config
 import logging.handlers
 import os
-import stat
 import sys
 import traceback
 
 from oslo.config import cfg
+from six import moves
 
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import importutils
 from conductor.openstack.common import jsonutils
 from conductor.openstack.common import local
-from conductor.openstack.common import notifier
 
 
-_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
 _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
 
 common_cli_opts = [
@@ -73,11 +71,14 @@ logging_cli_opts = [
                     'documentation for details on logging configuration '
                     'files.'),
     cfg.StrOpt('log-format',
-               default=_DEFAULT_LOG_FORMAT,
+               default=None,
                metavar='FORMAT',
-               help='A logging.Formatter log message format string which may '
+               help='DEPRECATED. '
+                    'A logging.Formatter log message format string which may '
                     'use any of the available logging.LogRecord attributes. '
-                    'Default: %(default)s'),
+                    'This option is deprecated.  Please use '
+                    'logging_context_format_string and '
+                    'logging_default_format_string instead.'),
     cfg.StrOpt('log-date-format',
                default=_DEFAULT_LOG_DATE_FORMAT,
                metavar='DATE_FORMAT',
@@ -87,11 +88,11 @@ logging_cli_opts = [
                metavar='PATH',
                deprecated_name='logfile',
                help='(Optional) Name of log file to output to. '
-                    'If not set, logging will go to stdout.'),
+                    'If no default is set, logging will go to stdout.'),
     cfg.StrOpt('log-dir',
                deprecated_name='logdir',
-               help='(Optional) The directory to keep log files in '
-                    '(will be prepended to --log-file)'),
+               help='(Optional) The base directory used for relative '
+                    '--log-file paths'),
     cfg.BoolOpt('use-syslog',
                 default=False,
                 help='Use syslog for logging.'),
@@ -103,17 +104,14 @@ logging_cli_opts = [
 generic_log_opts = [
     cfg.BoolOpt('use_stderr',
                 default=True,
-                help='Log output to standard error'),
-    cfg.StrOpt('logfile_mode',
-               default='0644',
-               help='Default file mode used when creating log files'),
+                help='Log output to standard error')
 ]
 
 log_opts = [
     cfg.StrOpt('logging_context_format_string',
-               default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
-                       '[%(request_id)s %(user)s %(tenant)s] %(instance)s'
-                       '%(message)s',
+               default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
+                       '%(name)s [%(request_id)s %(user)s %(tenant)s] '
+                       '%(instance)s%(message)s',
                help='format string to use for log messages with context'),
     cfg.StrOpt('logging_default_format_string',
                default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
@@ -210,7 +208,27 @@ def _get_log_file_path(binary=None):
         return '%s.log' % (os.path.join(logdir, binary),)
 
 
-class ContextAdapter(logging.LoggerAdapter):
+class BaseLoggerAdapter(logging.LoggerAdapter):
+
+    def audit(self, msg, *args, **kwargs):
+        self.log(logging.AUDIT, msg, *args, **kwargs)
+
+
+class LazyAdapter(BaseLoggerAdapter):
+    def __init__(self, name='unknown', version='unknown'):
+        self._logger = None
+        self.extra = {}
+        self.name = name
+        self.version = version
+
+    @property
+    def logger(self):
+        if not self._logger:
+            self._logger = getLogger(self.name, self.version)
+        return self._logger
+
+
+class ContextAdapter(BaseLoggerAdapter):
     warn = logging.LoggerAdapter.warning
 
     def __init__(self, logger, project_name, version_string):
@@ -218,8 +236,9 @@ class ContextAdapter(logging.LoggerAdapter):
         self.project = project_name
         self.version = version_string
 
-    def audit(self, msg, *args, **kwargs):
-        self.log(logging.AUDIT, msg, *args, **kwargs)
+    @property
+    def handlers(self):
+        return self.logger.handlers
 
     def deprecated(self, msg, *args, **kwargs):
         stdmsg = _("Deprecated: %s") % msg
@@ -303,17 +322,6 @@ class JSONFormatter(logging.Formatter):
         return jsonutils.dumps(message)
 
 
-class PublishErrorsHandler(logging.Handler):
-    def emit(self, record):
-        if ('conductor.openstack.common.notifier.log_notifier' in
-                CONF.notification_driver):
-            return
-        notifier.api.notify(None, 'error.publisher',
-                            'error_notification',
-                            notifier.api.ERROR,
-                            dict(error=record.msg))
-
-
 def _create_logging_excepthook(product_name):
     def logging_excepthook(type, value, tb):
         extra = {}
@@ -323,10 +331,30 @@ def _create_logging_excepthook(product_name):
     return logging_excepthook
 
 
+class LogConfigError(Exception):
+
+    message = _('Error loading logging config %(log_config)s: %(err_msg)s')
+
+    def __init__(self, log_config, err_msg):
+        self.log_config = log_config
+        self.err_msg = err_msg
+
+    def __str__(self):
+        return self.message % dict(log_config=self.log_config,
+                                   err_msg=self.err_msg)
+
+
+def _load_log_config(log_config):
+    try:
+        logging.config.fileConfig(log_config)
+    except moves.configparser.Error as exc:
+        raise LogConfigError(log_config, str(exc))
+
+
 def setup(product_name):
     """Setup logging."""
     if CONF.log_config:
-        logging.config.fileConfig(CONF.log_config)
+        _load_log_config(CONF.log_config)
     else:
         _setup_logging_from_conf()
     sys.excepthook = _create_logging_excepthook(product_name)
@@ -378,11 +406,6 @@ def _setup_logging_from_conf():
         filelog = logging.handlers.WatchedFileHandler(logpath)
         log_root.addHandler(filelog)
 
-        mode = int(CONF.logfile_mode, 8)
-        st = os.stat(logpath)
-        if st.st_mode != (stat.S_IFREG | mode):
-            os.chmod(logpath, mode)
-
     if CONF.use_stderr:
         streamlog = ColorHandler()
         log_root.addHandler(streamlog)
@@ -394,15 +417,22 @@ def _setup_logging_from_conf():
         log_root.addHandler(streamlog)
 
     if CONF.publish_errors:
-        log_root.addHandler(PublishErrorsHandler(logging.ERROR))
+        handler = importutils.import_object(
+            "conductor.openstack.common.log_handler.PublishErrorsHandler",
+            logging.ERROR)
+        log_root.addHandler(handler)
 
+    datefmt = CONF.log_date_format
     for handler in log_root.handlers:
-        datefmt = CONF.log_date_format
+        # NOTE(alaski): CONF.log_format overrides everything currently.  This
+        # should be deprecated in favor of context aware formatting.
         if CONF.log_format:
             handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
                                                    datefmt=datefmt))
+            log_root.info('Deprecated: log_format is now deprecated and will '
+                          'be removed in the next release')
         else:
-            handler.setFormatter(LegacyFormatter(datefmt=datefmt))
+            handler.setFormatter(ContextFormatter(datefmt=datefmt))
 
     if CONF.debug:
         log_root.setLevel(logging.DEBUG)
@@ -411,14 +441,11 @@ def _setup_logging_from_conf():
     else:
         log_root.setLevel(logging.WARNING)
 
-    level = logging.NOTSET
     for pair in CONF.default_log_levels:
         mod, _sep, level_name = pair.partition('=')
         level = logging.getLevelName(level_name)
         logger = logging.getLogger(mod)
         logger.setLevel(level)
-        for handler in log_root.handlers:
-            logger.addHandler(handler)
 
 _loggers = {}
 
@@ -431,6 +458,16 @@ def getLogger(name='unknown', version='unknown'):
     return _loggers[name]
 
 
+def getLazyLogger(name='unknown', version='unknown'):
+    """Returns lazy logger.
+
+    Creates a pass-through logger that does not create the real logger
+    until it is really needed and delegates all calls to the real logger
+    once it is created.
+    """
+    return LazyAdapter(name, version)
+
+
 class WritableLogger(object):
     """A thin wrapper that responds to `write` and logs."""
 
@@ -442,7 +479,7 @@ class WritableLogger(object):
         self.logger.log(self.level, msg)
 
 
-class LegacyFormatter(logging.Formatter):
+class ContextFormatter(logging.Formatter):
     """A context.RequestContext aware formatter configured through flags.
 
     The flags used to set format strings are: logging_context_format_string
@@ -483,7 +520,7 @@ class LegacyFormatter(logging.Formatter):
         if not record:
             return logging.Formatter.formatException(self, exc_info)
 
-        stringbuffer = cStringIO.StringIO()
+        stringbuffer = moves.StringIO()
         traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
                                   None, stringbuffer)
         lines = stringbuffer.getvalue().split('\n')
diff --git a/conductor/openstack/common/loopingcall.py b/conductor/openstack/common/loopingcall.py
index 08135f6..61223db 100644
--- a/conductor/openstack/common/loopingcall.py
+++ b/conductor/openstack/common/loopingcall.py
@@ -22,7 +22,7 @@ import sys
 from eventlet import event
 from eventlet import greenthread
 
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
 from conductor.openstack.common import log as logging
 from conductor.openstack.common import timeutils
 
@@ -46,12 +46,23 @@ class LoopingCallDone(Exception):
         self.retvalue = retvalue
 
 
-class LoopingCall(object):
+class LoopingCallBase(object):
     def __init__(self, f=None, *args, **kw):
         self.args = args
         self.kw = kw
         self.f = f
         self._running = False
+        self.done = None
+
+    def stop(self):
+        self._running = False
+
+    def wait(self):
+        return self.done.wait()
+
+
+class FixedIntervalLoopingCall(LoopingCallBase):
+    """A fixed interval looping call."""
 
     def start(self, interval, initial_delay=None):
         self._running = True
@@ -73,11 +84,11 @@ class LoopingCall(object):
                         LOG.warn(_('task run outlasted interval by %s sec') %
                                  -delay)
                     greenthread.sleep(delay if delay > 0 else 0)
-            except LoopingCallDone, e:
+            except LoopingCallDone as e:
                 self.stop()
                 done.send(e.retvalue)
             except Exception:
-                LOG.exception(_('in looping call'))
+                LOG.exception(_('in fixed duration looping call'))
                 done.send_exception(*sys.exc_info())
                 return
             else:
@@ -88,8 +99,49 @@ class LoopingCall(object):
         greenthread.spawn_n(_inner)
         return self.done
 
-    def stop(self):
-        self._running = False
 
-    def wait(self):
-        return self.done.wait()
+# TODO(mikal): this class name is deprecated in Havana and should be removed
+# in the I release
+LoopingCall = FixedIntervalLoopingCall
+
+
+class DynamicLoopingCall(LoopingCallBase):
+    """A looping call which sleeps until the next known event.
+
+    The function called should return how long to sleep for before being
+    called again.
+    """
+
+    def start(self, initial_delay=None, periodic_interval_max=None):
+        self._running = True
+        done = event.Event()
+
+        def _inner():
+            if initial_delay:
+                greenthread.sleep(initial_delay)
+
+            try:
+                while self._running:
+                    idle = self.f(*self.args, **self.kw)
+                    if not self._running:
+                        break
+
+                    if periodic_interval_max is not None:
+                        idle = min(idle, periodic_interval_max)
+                    LOG.debug(_('Dynamic looping call sleeping for %.02f '
+                                'seconds'), idle)
+                    greenthread.sleep(idle)
+            except LoopingCallDone as e:
+                self.stop()
+                done.send(e.retvalue)
+            except Exception:
+                LOG.exception(_('in dynamic looping call'))
+                done.send_exception(*sys.exc_info())
+                return
+            else:
+                done.send(True)
+
+        self.done = done
+
+        greenthread.spawn(_inner)
+        return self.done
diff --git a/conductor/openstack/common/network_utils.py b/conductor/openstack/common/network_utils.py
new file mode 100644
index 0000000..dbed1ce
--- /dev/null
+++ b/conductor/openstack/common/network_utils.py
@@ -0,0 +1,81 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Network-related utilities and helper functions.
+"""
+
+import urlparse
+
+
+def parse_host_port(address, default_port=None):
+    """Interpret a string as a host:port pair.
+
+    An IPv6 address MUST be escaped if accompanied by a port,
+    because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
+    means both [2001:db8:85a3::8a2e:370:7334] and
+    [2001:db8:85a3::8a2e:370]:7334.
+
+    >>> parse_host_port('server01:80')
+    ('server01', 80)
+    >>> parse_host_port('server01')
+    ('server01', None)
+    >>> parse_host_port('server01', default_port=1234)
+    ('server01', 1234)
+    >>> parse_host_port('[::1]:80')
+    ('::1', 80)
+    >>> parse_host_port('[::1]')
+    ('::1', None)
+    >>> parse_host_port('[::1]', default_port=1234)
+    ('::1', 1234)
+    >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
+    ('2001:db8:85a3::8a2e:370:7334', 1234)
+
+    """
+    if address[0] == '[':
+        # Escaped ipv6
+        _host, _port = address[1:].split(']')
+        host = _host
+        if ':' in _port:
+            port = _port.split(':')[1]
+        else:
+            port = default_port
+    else:
+        if address.count(':') == 1:
+            host, port = address.split(':')
+        else:
+            # 0 means ipv4, >1 means ipv6.
+            # We prohibit unescaped ipv6 addresses with port.
+            host = address
+            port = default_port
+
+    return (host, None if port is None else int(port))
+
+
+def urlsplit(url, scheme='', allow_fragments=True):
+    """Parse a URL using urlparse.urlsplit(), splitting query and fragments.
+    This function papers over Python issue9374 when needed.
+
+    The parameters are the same as urlparse.urlsplit.
+    """
+    scheme, netloc, path, query, fragment = urlparse.urlsplit(
+        url, scheme, allow_fragments)
+    if allow_fragments and '#' in path:
+        path, fragment = path.split('#', 1)
+    if '?' in path:
+        path, query = path.split('?', 1)
+    return urlparse.SplitResult(scheme, netloc, path, query, fragment)
diff --git a/conductor/openstack/common/notifier/api.py b/conductor/openstack/common/notifier/api.py
index 99f4240..da0b6f9 100644
--- a/conductor/openstack/common/notifier/api.py
+++ b/conductor/openstack/common/notifier/api.py
@@ -13,12 +13,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import socket
 import uuid
 
 from oslo.config import cfg
 
 from conductor.openstack.common import context
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
 from conductor.openstack.common import importutils
 from conductor.openstack.common import jsonutils
 from conductor.openstack.common import log as logging
@@ -30,13 +31,12 @@ LOG = logging.getLogger(__name__)
 notifier_opts = [
     cfg.MultiStrOpt('notification_driver',
                     default=[],
-                    deprecated_name='list_notifier_drivers',
                     help='Driver or drivers to handle sending notifications'),
     cfg.StrOpt('default_notification_level',
                default='INFO',
                help='Default notification level for outgoing notifications'),
     cfg.StrOpt('default_publisher_id',
-               default='$host',
+               default=None,
                help='Default publisher_id for outgoing notifications'),
 ]
 
@@ -57,7 +57,7 @@ class BadPriorityException(Exception):
 
 
 def notify_decorator(name, fn):
-    """ decorator for notify which is used from utils.monkey_patch()
+    """Decorator for notify which is used from utils.monkey_patch().
 
         :param name: name of the function
         :param function: - object of the function
@@ -75,7 +75,7 @@ def notify_decorator(name, fn):
 
         ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
         notify(ctxt,
-               CONF.default_publisher_id,
+               CONF.default_publisher_id or socket.gethostname(),
                name,
                CONF.default_notification_level,
                body)
@@ -85,7 +85,10 @@ def notify_decorator(name, fn):
 
 def publisher_id(service, host=None):
     if not host:
-        host = CONF.host
+        try:
+            host = CONF.host
+        except AttributeError:
+            host = CONF.default_publisher_id or socket.gethostname()
     return "%s.%s" % (service, host)
 
 
@@ -154,29 +157,16 @@ def _get_drivers():
     if _drivers is None:
         _drivers = {}
         for notification_driver in CONF.notification_driver:
-            add_driver(notification_driver)
-
+            try:
+                driver = importutils.import_module(notification_driver)
+                _drivers[notification_driver] = driver
+            except ImportError:
+                LOG.exception(_("Failed to load notifier %s. "
+                                "These notifications will not be sent.") %
+                              notification_driver)
     return _drivers.values()
 
 
-def add_driver(notification_driver):
-    """Add a notification driver at runtime."""
-    # Make sure the driver list is initialized.
-    _get_drivers()
-    if isinstance(notification_driver, basestring):
-        # Load and add
-        try:
-            driver = importutils.import_module(notification_driver)
-            _drivers[notification_driver] = driver
-        except ImportError:
-            LOG.exception(_("Failed to load notifier %s. "
-                            "These notifications will not be sent.") %
-                          notification_driver)
-    else:
-        # Driver is already loaded; just add the object.
-        _drivers[notification_driver] = notification_driver
-
-
 def _reset_drivers():
     """Used by unit tests to reset the drivers."""
     global _drivers
diff --git a/conductor/openstack/common/notifier/log_notifier.py b/conductor/openstack/common/notifier/log_notifier.py
index 9f159fa..7bbede8 100644
--- a/conductor/openstack/common/notifier/log_notifier.py
+++ b/conductor/openstack/common/notifier/log_notifier.py
@@ -24,7 +24,9 @@ CONF = cfg.CONF
 
 def notify(_context, message):
     """Notifies the recipient of the desired event given the model.
-    Log notifications using openstack's default logging system"""
+
+    Log notifications using openstack's default logging system.
+    """
 
     priority = message.get('priority',
                            CONF.default_notification_level)
diff --git a/conductor/openstack/common/notifier/no_op_notifier.py b/conductor/openstack/common/notifier/no_op_notifier.py
index bc7a56c..13d946e 100644
--- a/conductor/openstack/common/notifier/no_op_notifier.py
+++ b/conductor/openstack/common/notifier/no_op_notifier.py
@@ -15,5 +15,5 @@
 
 
 def notify(_context, message):
-    """Notifies the recipient of the desired event given the model"""
+    """Notifies the recipient of the desired event given the model."""
     pass
diff --git a/conductor/openstack/common/notifier/rpc_notifier.py b/conductor/openstack/common/notifier/rpc_notifier.py
index 67d615d..590d2c1 100644
--- a/conductor/openstack/common/notifier/rpc_notifier.py
+++ b/conductor/openstack/common/notifier/rpc_notifier.py
@@ -16,7 +16,7 @@
 from oslo.config import cfg
 
 from conductor.openstack.common import context as req_context
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
 from conductor.openstack.common import log as logging
 from conductor.openstack.common import rpc
 
@@ -31,7 +31,7 @@ CONF.register_opt(notification_topic_opt)
 
 
 def notify(context, message):
-    """Sends a notification via RPC"""
+    """Sends a notification via RPC."""
     if not context:
         context = req_context.get_admin_context()
     priority = message.get('priority',
diff --git a/conductor/openstack/common/notifier/rpc_notifier2.py b/conductor/openstack/common/notifier/rpc_notifier2.py
index 3585e7e..e419bc3 100644
--- a/conductor/openstack/common/notifier/rpc_notifier2.py
+++ b/conductor/openstack/common/notifier/rpc_notifier2.py
@@ -18,7 +18,7 @@
 from oslo.config import cfg
 
 from conductor.openstack.common import context as req_context
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
 from conductor.openstack.common import log as logging
 from conductor.openstack.common import rpc
 
@@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group)
 
 
 def notify(context, message):
-    """Sends a notification via RPC"""
+    """Sends a notification via RPC."""
     if not context:
         context = req_context.get_admin_context()
     priority = message.get('priority',
diff --git a/conductor/openstack/common/rpc/__init__.py b/conductor/openstack/common/rpc/__init__.py
new file mode 100644
index 0000000..ee77297
--- /dev/null
+++ b/conductor/openstack/common/rpc/__init__.py
@@ -0,0 +1,307 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+A remote procedure call (rpc) abstraction.
+
+For some wrappers that add message versioning to rpc, see:
+    rpc.dispatcher
+    rpc.proxy
+"""
+
+import inspect
+
+from oslo.config import cfg
+
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import importutils
+from conductor.openstack.common import local
+from conductor.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+rpc_opts = [
+    cfg.StrOpt('rpc_backend',
+               default='%s.impl_kombu' % __package__,
+               help="The messaging module to use, defaults to kombu."),
+    cfg.IntOpt('rpc_thread_pool_size',
+               default=64,
+               help='Size of RPC thread pool'),
+    cfg.IntOpt('rpc_conn_pool_size',
+               default=30,
+               help='Size of RPC connection pool'),
+    cfg.IntOpt('rpc_response_timeout',
+               default=60,
+               help='Seconds to wait for a response from call or multicall'),
+    cfg.IntOpt('rpc_cast_timeout',
+               default=30,
+               help='Seconds to wait before a cast expires (TTL). '
+                    'Only supported by impl_zmq.'),
+    cfg.ListOpt('allowed_rpc_exception_modules',
+                default=['conductor.openstack.common.exception',
+                         'nova.exception',
+                         'cinder.exception',
+                         'exceptions',
+                         ],
+                help='Modules of exceptions that are permitted to be recreated'
+                     'upon receiving exception data from an rpc call.'),
+    cfg.BoolOpt('fake_rabbit',
+                default=False,
+                help='If passed, use a fake RabbitMQ provider'),
+    cfg.StrOpt('control_exchange',
+               default='openstack',
+               help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(rpc_opts)
+
+
+def set_defaults(control_exchange):
+    cfg.set_defaults(rpc_opts,
+                     control_exchange=control_exchange)
+
+
+def create_connection(new=True):
+    """Create a connection to the message bus used for rpc.
+
+    For some example usage of creating a connection and some consumers on that
+    connection, see nova.service.
+
+    :param new: Whether or not to create a new connection.  A new connection
+                will be created by default.  If new is False, the
+                implementation is free to return an existing connection from a
+                pool.
+
+    :returns: An instance of openstack.common.rpc.common.Connection
+    """
+    return _get_impl().create_connection(CONF, new=new)
+
+
+def _check_for_lock():
+    if not CONF.debug:
+        return None
+
+    if ((hasattr(local.strong_store, 'locks_held')
+         and local.strong_store.locks_held)):
+        stack = ' :: '.join([frame[3] for frame in inspect.stack()])
+        LOG.warn(_('A RPC is being made while holding a lock. The locks '
+                   'currently held are %(locks)s. This is probably a bug. '
+                   'Please report it. Include the following: [%(stack)s].'),
+                 {'locks': local.strong_store.locks_held,
+                  'stack': stack})
+        return True
+
+    return False
+
+
+def call(context, topic, msg, timeout=None, check_for_lock=False):
+    """Invoke a remote method that returns something.
+
+    :param context: Information that identifies the user that has made this
+                    request.
+    :param topic: The topic to send the rpc message to.  This correlates to the
+                  topic argument of
+                  openstack.common.rpc.common.Connection.create_consumer()
+                  and only applies when the consumer was created with
+                  fanout=False.
+    :param msg: This is a dict in the form { "method" : "method_to_invoke",
+                                             "args" : dict_of_kwargs }
+    :param timeout: int, number of seconds to use for a response timeout.
+                    If set, this overrides the rpc_response_timeout option.
+    :param check_for_lock: if True, a warning is emitted if a RPC call is made
+                    with a lock held.
+
+    :returns: A dict from the remote method.
+
+    :raises: openstack.common.rpc.common.Timeout if a complete response
+             is not received before the timeout is reached.
+    """
+    if check_for_lock:
+        _check_for_lock()
+    return _get_impl().call(CONF, context, topic, msg, timeout)
+
+
+def cast(context, topic, msg):
+    """Invoke a remote method that does not return anything.
+
+    :param context: Information that identifies the user that has made this
+                    request.
+    :param topic: The topic to send the rpc message to.  This correlates to the
+                  topic argument of
+                  openstack.common.rpc.common.Connection.create_consumer()
+                  and only applies when the consumer was created with
+                  fanout=False.
+    :param msg: This is a dict in the form { "method" : "method_to_invoke",
+                                             "args" : dict_of_kwargs }
+
+    :returns: None
+    """
+    return _get_impl().cast(CONF, context, topic, msg)
+
+
+def fanout_cast(context, topic, msg):
+    """Broadcast a remote method invocation with no return.
+
+    This method will get invoked on all consumers that were set up with this
+    topic name and fanout=True.
+
+    :param context: Information that identifies the user that has made this
+                    request.
+    :param topic: The topic to send the rpc message to.  This correlates to the
+                  topic argument of
+                  openstack.common.rpc.common.Connection.create_consumer()
+                  and only applies when the consumer was created with
+                  fanout=True.
+    :param msg: This is a dict in the form { "method" : "method_to_invoke",
+                                             "args" : dict_of_kwargs }
+
+    :returns: None
+    """
+    return _get_impl().fanout_cast(CONF, context, topic, msg)
+
+
+def multicall(context, topic, msg, timeout=None, check_for_lock=False):
+    """Invoke a remote method and get back an iterator.
+
+    In this case, the remote method will be returning multiple values in
+    separate messages, so the return values can be processed as the come in via
+    an iterator.
+
+    :param context: Information that identifies the user that has made this
+                    request.
+    :param topic: The topic to send the rpc message to.  This correlates to the
+                  topic argument of
+                  openstack.common.rpc.common.Connection.create_consumer()
+                  and only applies when the consumer was created with
+                  fanout=False.
+    :param msg: This is a dict in the form { "method" : "method_to_invoke",
+                                             "args" : dict_of_kwargs }
+    :param timeout: int, number of seconds to use for a response timeout.
+                    If set, this overrides the rpc_response_timeout option.
+    :param check_for_lock: if True, a warning is emitted if a RPC call is made
+                    with a lock held.
+
+    :returns: An iterator.  The iterator will yield a tuple (N, X) where N is
+              an index that starts at 0 and increases by one for each value
+              returned and X is the Nth value that was returned by the remote
+              method.
+
+    :raises: openstack.common.rpc.common.Timeout if a complete response
+             is not received before the timeout is reached.
+    """
+    if check_for_lock:
+        _check_for_lock()
+    return _get_impl().multicall(CONF, context, topic, msg, timeout)
+
+
+def notify(context, topic, msg, envelope=False):
+    """Send notification event.
+
+    :param context: Information that identifies the user that has made this
+                    request.
+    :param topic: The topic to send the notification to.
+    :param msg: This is a dict of content of event.
+    :param envelope: Set to True to enable message envelope for notifications.
+
+    :returns: None
+    """
+    return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
+
+
+def cleanup():
+    """Clean up resoruces in use by implementation.
+
+    Clean up any resources that have been allocated by the RPC implementation.
+    This is typically open connections to a messaging service.  This function
+    would get called before an application using this API exits to allow
+    connections to get torn down cleanly.
+
+    :returns: None
+    """
+    return _get_impl().cleanup()
+
+
+def cast_to_server(context, server_params, topic, msg):
+    """Invoke a remote method that does not return anything.
+
+    :param context: Information that identifies the user that has made this
+                    request.
+    :param server_params: Connection information
+    :param topic: The topic to send the notification to.
+    :param msg: This is a dict in the form { "method" : "method_to_invoke",
+                                             "args" : dict_of_kwargs }
+
+    :returns: None
+    """
+    return _get_impl().cast_to_server(CONF, context, server_params, topic,
+                                      msg)
+
+
+def fanout_cast_to_server(context, server_params, topic, msg):
+    """Broadcast to a remote method invocation with no return.
+
+    :param context: Information that identifies the user that has made this
+                    request.
+    :param server_params: Connection information
+    :param topic: The topic to send the notification to.
+    :param msg: This is a dict in the form { "method" : "method_to_invoke",
+                                             "args" : dict_of_kwargs }
+
+    :returns: None
+    """
+    return _get_impl().fanout_cast_to_server(CONF, context, server_params,
+                                             topic, msg)
+
+
+def queue_get_for(context, topic, host):
+    """Get a queue name for a given topic + host.
+
+    This function only works if this naming convention is followed on the
+    consumer side, as well.  For example, in nova, every instance of the
+    nova-foo service calls create_consumer() for two topics:
+
+        foo
+        foo.<host>
+
+    Messages sent to the 'foo' topic are distributed to exactly one instance of
+    the nova-foo service.  The services are chosen in a round-robin fashion.
+    Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
+    <host>.
+    """
+    return '%s.%s' % (topic, host) if host else topic
+
+
+_RPCIMPL = None
+
+
+def _get_impl():
+    """Delay import of rpc_backend until configuration is loaded."""
+    global _RPCIMPL
+    if _RPCIMPL is None:
+        try:
+            _RPCIMPL = importutils.import_module(CONF.rpc_backend)
+        except ImportError:
+            # For backwards compatibility with older nova config.
+            impl = CONF.rpc_backend.replace('nova.rpc',
+                                            'nova.openstack.common.rpc')
+            _RPCIMPL = importutils.import_module(impl)
+    return _RPCIMPL
diff --git a/conductor/openstack/common/rpc/amqp.py b/conductor/openstack/common/rpc/amqp.py
new file mode 100644
index 0000000..ae51b77
--- /dev/null
+++ b/conductor/openstack/common/rpc/amqp.py
@@ -0,0 +1,610 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 - 2012, Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Shared code between AMQP based openstack.common.rpc implementations.
+
+The code in this module is shared between the rpc implemenations based on AMQP.
+Specifically, this includes impl_kombu and impl_qpid.  impl_carrot also uses
+AMQP, but is deprecated and predates this code.
+"""
+
+import collections
+import inspect
+import sys
+import uuid
+
+from eventlet import greenpool
+from eventlet import pools
+from eventlet import queue
+from eventlet import semaphore
+from oslo.config import cfg
+
+from conductor.openstack.common import excutils
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import local
+from conductor.openstack.common import log as logging
+from conductor.openstack.common.rpc import common as rpc_common
+
+
+amqp_opts = [
+    cfg.BoolOpt('amqp_durable_queues',
+                default=False,
+                deprecated_name='rabbit_durable_queues',
+                deprecated_group='DEFAULT',
+                help='Use durable queues in amqp.'),
+    cfg.BoolOpt('amqp_auto_delete',
+                default=False,
+                help='Auto-delete queues in amqp.'),
+]
+
+cfg.CONF.register_opts(amqp_opts)
+
+UNIQUE_ID = '_unique_id'
+LOG = logging.getLogger(__name__)
+
+
+class Pool(pools.Pool):
+    """Class that implements a Pool of Connections."""
+    def __init__(self, conf, connection_cls, *args, **kwargs):
+        self.connection_cls = connection_cls
+        self.conf = conf
+        kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
+        kwargs.setdefault("order_as_stack", True)
+        super(Pool, self).__init__(*args, **kwargs)
+        self.reply_proxy = None
+
+    # TODO(comstud): Timeout connections not used in a while
+    def create(self):
+        LOG.debug(_('Pool creating new connection'))
+        return self.connection_cls(self.conf)
+
+    def empty(self):
+        while self.free_items:
+            self.get().close()
+        # Force a new connection pool to be created.
+        # Note that this was added due to failing unit test cases. The issue
+        # is the above "while loop" gets all the cached connections from the
+        # pool and closes them, but never returns them to the pool, a pool
+        # leak. The unit tests hang waiting for an item to be returned to the
+        # pool. The unit tests get here via the tearDown() method. In the run
+        # time code, it gets here via cleanup() and only appears in service.py
+        # just before doing a sys.exit(), so cleanup() only happens once and
+        # the leakage is not a problem.
+        self.connection_cls.pool = None
+
+
+_pool_create_sem = semaphore.Semaphore()
+
+
+def get_connection_pool(conf, connection_cls):
+    with _pool_create_sem:
+        # Make sure only one thread tries to create the connection pool.
+        if not connection_cls.pool:
+            connection_cls.pool = Pool(conf, connection_cls)
+    return connection_cls.pool
+
+
+class ConnectionContext(rpc_common.Connection):
+    """The class that is actually returned to the create_connection() caller.
+
+    This is essentially a wrapper around Connection that supports 'with'.
+    It can also return a new Connection, or one from a pool.
+
+    The function will also catch when an instance of this class is to be
+    deleted.  With that we can return Connections to the pool on exceptions
+    and so forth without making the caller be responsible for catching them.
+    If possible the function makes sure to return a connection to the pool.
+    """
+
+    def __init__(self, conf, connection_pool, pooled=True, server_params=None):
+        """Create a new connection, or get one from the pool."""
+        self.connection = None
+        self.conf = conf
+        self.connection_pool = connection_pool
+        if pooled:
+            self.connection = connection_pool.get()
+        else:
+            self.connection = connection_pool.connection_cls(
+                conf,
+                server_params=server_params)
+        self.pooled = pooled
+
+    def __enter__(self):
+        """When with ConnectionContext() is used, return self."""
+        return self
+
+    def _done(self):
+        """If the connection came from a pool, clean it up and put it back.
+        If it did not come from a pool, close it.
+        """
+        if self.connection:
+            if self.pooled:
+                # Reset the connection so it's ready for the next caller
+                # to grab from the pool
+                self.connection.reset()
+                self.connection_pool.put(self.connection)
+            else:
+                try:
+                    self.connection.close()
+                except Exception:
+                    pass
+            self.connection = None
+
+    def __exit__(self, exc_type, exc_value, tb):
+        """End of 'with' statement.  We're done here."""
+        self._done()
+
+    def __del__(self):
+        """Caller is done with this connection.  Make sure we cleaned up."""
+        self._done()
+
+    def close(self):
+        """Caller is done with this connection."""
+        self._done()
+
+    def create_consumer(self, topic, proxy, fanout=False):
+        self.connection.create_consumer(topic, proxy, fanout)
+
+    def create_worker(self, topic, proxy, pool_name):
+        self.connection.create_worker(topic, proxy, pool_name)
+
+    def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
+                           ack_on_error=True):
+        self.connection.join_consumer_pool(callback,
+                                           pool_name,
+                                           topic,
+                                           exchange_name,
+                                           ack_on_error)
+
+    def consume_in_thread(self):
+        self.connection.consume_in_thread()
+
+    def __getattr__(self, key):
+        """Proxy all other calls to the Connection instance."""
+        if self.connection:
+            return getattr(self.connection, key)
+        else:
+            raise rpc_common.InvalidRPCConnectionReuse()
+
+
+class ReplyProxy(ConnectionContext):
+    """Connection class for RPC replies / callbacks."""
+    def __init__(self, conf, connection_pool):
+        self._call_waiters = {}
+        self._num_call_waiters = 0
+        self._num_call_waiters_wrn_threshhold = 10
+        self._reply_q = 'reply_' + uuid.uuid4().hex
+        super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
+        self.declare_direct_consumer(self._reply_q, self._process_data)
+        self.consume_in_thread()
+
+    def _process_data(self, message_data):
+        msg_id = message_data.pop('_msg_id', None)
+        waiter = self._call_waiters.get(msg_id)
+        if not waiter:
+            LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
+                       ', message : %(data)s'), {'msg_id': msg_id,
+                                                 'data': message_data})
+            LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
+        else:
+            waiter.put(message_data)
+
+    def add_call_waiter(self, waiter, msg_id):
+        self._num_call_waiters += 1
+        if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
+            LOG.warn(_('Number of call waiters is greater than warning '
+                       'threshhold: %d. There could be a MulticallProxyWaiter '
+                       'leak.') % self._num_call_waiters_wrn_threshhold)
+            self._num_call_waiters_wrn_threshhold *= 2
+        self._call_waiters[msg_id] = waiter
+
+    def del_call_waiter(self, msg_id):
+        self._num_call_waiters -= 1
+        del self._call_waiters[msg_id]
+
+    def get_reply_q(self):
+        return self._reply_q
+
+
+def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
+              failure=None, ending=False, log_failure=True):
+    """Sends a reply or an error on the channel signified by msg_id.
+
+    Failure should be a sys.exc_info() tuple.
+
+    """
+    with ConnectionContext(conf, connection_pool) as conn:
+        if failure:
+            failure = rpc_common.serialize_remote_exception(failure,
+                                                            log_failure)
+
+        msg = {'result': reply, 'failure': failure}
+        if ending:
+            msg['ending'] = True
+        _add_unique_id(msg)
+        # If a reply_q exists, add the msg_id to the reply and pass the
+        # reply_q to direct_send() to use it as the response queue.
+        # Otherwise use the msg_id for backward compatibilty.
+        if reply_q:
+            msg['_msg_id'] = msg_id
+            conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
+        else:
+            conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
+
+
+class RpcContext(rpc_common.CommonRpcContext):
+    """Context that supports replying to a rpc.call."""
+    def __init__(self, **kwargs):
+        self.msg_id = kwargs.pop('msg_id', None)
+        self.reply_q = kwargs.pop('reply_q', None)
+        self.conf = kwargs.pop('conf')
+        super(RpcContext, self).__init__(**kwargs)
+
+    def deepcopy(self):
+        values = self.to_dict()
+        values['conf'] = self.conf
+        values['msg_id'] = self.msg_id
+        values['reply_q'] = self.reply_q
+        return self.__class__(**values)
+
+    def reply(self, reply=None, failure=None, ending=False,
+              connection_pool=None, log_failure=True):
+        if self.msg_id:
+            msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
+                      reply, failure, ending, log_failure)
+            if ending:
+                self.msg_id = None
+
+
+def unpack_context(conf, msg):
+    """Unpack context from msg."""
+    context_dict = {}
+    for key in list(msg.keys()):
+        # NOTE(vish): Some versions of python don't like unicode keys
+        #             in kwargs.
+        key = str(key)
+        if key.startswith('_context_'):
+            value = msg.pop(key)
+            context_dict[key[9:]] = value
+    context_dict['msg_id'] = msg.pop('_msg_id', None)
+    context_dict['reply_q'] = msg.pop('_reply_q', None)
+    context_dict['conf'] = conf
+    ctx = RpcContext.from_dict(context_dict)
+    rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
+    return ctx
+
+
+def pack_context(msg, context):
+    """Pack context into msg.
+
+    Values for message keys need to be less than 255 chars, so we pull
+    context out into a bunch of separate keys. If we want to support
+    more arguments in rabbit messages, we may want to do the same
+    for args at some point.
+
+    """
+    context_d = dict([('_context_%s' % key, value)
+                      for (key, value) in context.to_dict().iteritems()])
+    msg.update(context_d)
+
+
+class _MsgIdCache(object):
+    """This class checks any duplicate messages."""
+
+    # NOTE: This value is considered can be a configuration item, but
+    #       it is not necessary to change its value in most cases,
+    #       so let this value as static for now.
+    DUP_MSG_CHECK_SIZE = 16
+
+    def __init__(self, **kwargs):
+        self.prev_msgids = collections.deque([],
+                                             maxlen=self.DUP_MSG_CHECK_SIZE)
+
+    def check_duplicate_message(self, message_data):
+        """AMQP consumers may read same message twice when exceptions occur
+           before ack is returned. This method prevents doing it.
+        """
+        if UNIQUE_ID in message_data:
+            msg_id = message_data[UNIQUE_ID]
+            if msg_id not in self.prev_msgids:
+                self.prev_msgids.append(msg_id)
+            else:
+                raise rpc_common.DuplicateMessageError(msg_id=msg_id)
+
+
+def _add_unique_id(msg):
+    """Add unique_id for checking duplicate messages."""
+    unique_id = uuid.uuid4().hex
+    msg.update({UNIQUE_ID: unique_id})
+    LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
+
+
+class _ThreadPoolWithWait(object):
+    """Base class for a delayed invocation manager.
+
+    Used by the Connection class to start up green threads
+    to handle incoming messages.
+    """
+
+    def __init__(self, conf, connection_pool):
+        self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
+        self.connection_pool = connection_pool
+        self.conf = conf
+
+    def wait(self):
+        """Wait for all callback threads to exit."""
+        self.pool.waitall()
+
+
+class CallbackWrapper(_ThreadPoolWithWait):
+    """Wraps a straight callback.
+
+    Allows it to be invoked in a green thread.
+    """
+
+    def __init__(self, conf, callback, connection_pool):
+        """Initiates CallbackWrapper object.
+
+        :param conf: cfg.CONF instance
+        :param callback: a callable (probably a function)
+        :param connection_pool: connection pool as returned by
+                                get_connection_pool()
+        """
+        super(CallbackWrapper, self).__init__(
+            conf=conf,
+            connection_pool=connection_pool,
+        )
+        self.callback = callback
+
+    def __call__(self, message_data):
+        self.pool.spawn_n(self.callback, message_data)
+
+
+class ProxyCallback(_ThreadPoolWithWait):
+    """Calls methods on a proxy object based on method and args."""
+
+    def __init__(self, conf, proxy, connection_pool):
+        super(ProxyCallback, self).__init__(
+            conf=conf,
+            connection_pool=connection_pool,
+        )
+        self.proxy = proxy
+        self.msg_id_cache = _MsgIdCache()
+
+    def __call__(self, message_data):
+        """Consumer callback to call a method on a proxy object.
+
+        Parses the message for validity and fires off a thread to call the
+        proxy object method.
+
+        Message data should be a dictionary with two keys:
+            method: string representing the method to call
+            args: dictionary of arg: value
+
+        Example: {'method': 'echo', 'args': {'value': 42}}
+
+        """
+        # It is important to clear the context here, because at this point
+        # the previous context is stored in local.store.context
+        if hasattr(local.store, 'context'):
+            del local.store.context
+        rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
+        self.msg_id_cache.check_duplicate_message(message_data)
+        ctxt = unpack_context(self.conf, message_data)
+        method = message_data.get('method')
+        args = message_data.get('args', {})
+        version = message_data.get('version')
+        namespace = message_data.get('namespace')
+        if not method:
+            LOG.warn(_('no method for message: %s') % message_data)
+            ctxt.reply(_('No method for message: %s') % message_data,
+                       connection_pool=self.connection_pool)
+            return
+        self.pool.spawn_n(self._process_data, ctxt, version, method,
+                          namespace, args)
+
+    def _process_data(self, ctxt, version, method, namespace, args):
+        """Process a message in a new thread.
+
+        If the proxy object we have has a dispatch method
+        (see rpc.dispatcher.RpcDispatcher), pass it the version,
+        method, and args and let it dispatch as appropriate.  If not, use
+        the old behavior of magically calling the specified method on the
+        proxy we have here.
+        """
+        ctxt.update_store()
+        try:
+            rval = self.proxy.dispatch(ctxt, version, method, namespace,
+                                       **args)
+            # Check if the result was a generator
+            if inspect.isgenerator(rval):
+                for x in rval:
+                    ctxt.reply(x, None, connection_pool=self.connection_pool)
+            else:
+                ctxt.reply(rval, None, connection_pool=self.connection_pool)
+            # This final None tells multicall that it is done.
+            ctxt.reply(ending=True, connection_pool=self.connection_pool)
+        except rpc_common.ClientException as e:
+            LOG.debug(_('Expected exception during message handling (%s)') %
+                      e._exc_info[1])
+            ctxt.reply(None, e._exc_info,
+                       connection_pool=self.connection_pool,
+                       log_failure=False)
+        except Exception:
+            # sys.exc_info() is deleted by LOG.exception().
+            exc_info = sys.exc_info()
+            LOG.error(_('Exception during message handling'),
+                      exc_info=exc_info)
+            ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
+
+
+class MulticallProxyWaiter(object):
+    def __init__(self, conf, msg_id, timeout, connection_pool):
+        self._msg_id = msg_id
+        self._timeout = timeout or conf.rpc_response_timeout
+        self._reply_proxy = connection_pool.reply_proxy
+        self._done = False
+        self._got_ending = False
+        self._conf = conf
+        self._dataqueue = queue.LightQueue()
+        # Add this caller to the reply proxy's call_waiters
+        self._reply_proxy.add_call_waiter(self, self._msg_id)
+        self.msg_id_cache = _MsgIdCache()
+
+    def put(self, data):
+        self._dataqueue.put(data)
+
+    def done(self):
+        if self._done:
+            return
+        self._done = True
+        # Remove this caller from reply proxy's call_waiters
+        self._reply_proxy.del_call_waiter(self._msg_id)
+
+    def _process_data(self, data):
+        result = None
+        self.msg_id_cache.check_duplicate_message(data)
+        if data['failure']:
+            failure = data['failure']
+            result = rpc_common.deserialize_remote_exception(self._conf,
+                                                             failure)
+        elif data.get('ending', False):
+            self._got_ending = True
+        else:
+            result = data['result']
+        return result
+
+    def __iter__(self):
+        """Return a result until we get a reply with an 'ending' flag."""
+        if self._done:
+            raise StopIteration
+        while True:
+            try:
+                data = self._dataqueue.get(timeout=self._timeout)
+                result = self._process_data(data)
+            except queue.Empty:
+                self.done()
+                raise rpc_common.Timeout()
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    self.done()
+            if self._got_ending:
+                self.done()
+                raise StopIteration
+            if isinstance(result, Exception):
+                self.done()
+                raise result
+            yield result
+
+
+def create_connection(conf, new, connection_pool):
+    """Create a connection."""
+    return ConnectionContext(conf, connection_pool, pooled=not new)
+
+
+_reply_proxy_create_sem = semaphore.Semaphore()
+
+
+def multicall(conf, context, topic, msg, timeout, connection_pool):
+    """Make a call that returns multiple times."""
+    LOG.debug(_('Making synchronous call on %s ...'), topic)
+    msg_id = uuid.uuid4().hex
+    msg.update({'_msg_id': msg_id})
+    LOG.debug(_('MSG_ID is %s') % (msg_id))
+    _add_unique_id(msg)
+    pack_context(msg, context)
+
+    with _reply_proxy_create_sem:
+        if not connection_pool.reply_proxy:
+            connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
+    msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
+    wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
+    with ConnectionContext(conf, connection_pool) as conn:
+        conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
+    return wait_msg
+
+
+def call(conf, context, topic, msg, timeout, connection_pool):
+    """Sends a message on a topic and wait for a response."""
+    rv = multicall(conf, context, topic, msg, timeout, connection_pool)
+    # NOTE(vish): return the last result from the multicall
+    rv = list(rv)
+    if not rv:
+        return
+    return rv[-1]
+
+
+def cast(conf, context, topic, msg, connection_pool):
+    """Sends a message on a topic without waiting for a response."""
+    LOG.debug(_('Making asynchronous cast on %s...'), topic)
+    _add_unique_id(msg)
+    pack_context(msg, context)
+    with ConnectionContext(conf, connection_pool) as conn:
+        conn.topic_send(topic, rpc_common.serialize_msg(msg))
+
+
+def fanout_cast(conf, context, topic, msg, connection_pool):
+    """Sends a message on a fanout exchange without waiting for a response."""
+    LOG.debug(_('Making asynchronous fanout cast...'))
+    _add_unique_id(msg)
+    pack_context(msg, context)
+    with ConnectionContext(conf, connection_pool) as conn:
+        conn.fanout_send(topic, rpc_common.serialize_msg(msg))
+
+
+def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
+    """Sends a message on a topic to a specific server."""
+    _add_unique_id(msg)
+    pack_context(msg, context)
+    with ConnectionContext(conf, connection_pool, pooled=False,
+                           server_params=server_params) as conn:
+        conn.topic_send(topic, rpc_common.serialize_msg(msg))
+
+
+def fanout_cast_to_server(conf, context, server_params, topic, msg,
+                          connection_pool):
+    """Sends a message on a fanout exchange to a specific server."""
+    _add_unique_id(msg)
+    pack_context(msg, context)
+    with ConnectionContext(conf, connection_pool, pooled=False,
+                           server_params=server_params) as conn:
+        conn.fanout_send(topic, rpc_common.serialize_msg(msg))
+
+
+def notify(conf, context, topic, msg, connection_pool, envelope):
+    """Sends a notification event on a topic."""
+    LOG.debug(_('Sending %(event_type)s on %(topic)s'),
+              dict(event_type=msg.get('event_type'),
+                   topic=topic))
+    _add_unique_id(msg)
+    pack_context(msg, context)
+    with ConnectionContext(conf, connection_pool) as conn:
+        if envelope:
+            msg = rpc_common.serialize_msg(msg)
+        conn.notify_send(topic, msg)
+
+
+def cleanup(connection_pool):
+    if connection_pool:
+        connection_pool.empty()
+
+
+def get_control_exchange(conf):
+    return conf.control_exchange
diff --git a/conductor/openstack/common/rpc/common.py b/conductor/openstack/common/rpc/common.py
new file mode 100644
index 0000000..798a47b
--- /dev/null
+++ b/conductor/openstack/common/rpc/common.py
@@ -0,0 +1,509 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import sys
+import traceback
+
+from oslo.config import cfg
+import six
+
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import importutils
+from conductor.openstack.common import jsonutils
+from conductor.openstack.common import local
+from conductor.openstack.common import log as logging
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+'''RPC Envelope Version.
+
+This version number applies to the top level structure of messages sent out.
+It does *not* apply to the message payload, which must be versioned
+independently.  For example, when using rpc APIs, a version number is applied
+for changes to the API being exposed over rpc.  This version number is handled
+in the rpc proxy and dispatcher modules.
+
+This version number applies to the message envelope that is used in the
+serialization done inside the rpc layer.  See serialize_msg() and
+deserialize_msg().
+
+The current message format (version 2.0) is very simple.  It is:
+
+    {
+        'oslo.version': <RPC Envelope Version as a String>,
+        'oslo.message': <Application Message Payload, JSON encoded>
+    }
+
+Message format version '1.0' is just considered to be the messages we sent
+without a message envelope.
+
+So, the current message envelope just includes the envelope version.  It may
+eventually contain additional information, such as a signature for the message
+payload.
+
+We will JSON encode the application message payload.  The message envelope,
+which includes the JSON encoded application message body, will be passed down
+to the messaging libraries as a dict.
+'''
+_RPC_ENVELOPE_VERSION = '2.0'
+
+_VERSION_KEY = 'oslo.version'
+_MESSAGE_KEY = 'oslo.message'
+
+_REMOTE_POSTFIX = '_Remote'
+
+
+class RPCException(Exception):
+    msg_fmt = _("An unknown RPC related exception occurred.")
+
+    def __init__(self, message=None, **kwargs):
+        self.kwargs = kwargs
+
+        if not message:
+            try:
+                message = self.msg_fmt % kwargs
+
+            except Exception:
+                # kwargs doesn't match a variable in the message
+                # log the issue and the kwargs
+                LOG.exception(_('Exception in string format operation'))
+                for name, value in kwargs.iteritems():
+                    LOG.error("%s: %s" % (name, value))
+                # at least get the core message out if something happened
+                message = self.msg_fmt
+
+        super(RPCException, self).__init__(message)
+
+
+class RemoteError(RPCException):
+    """Signifies that a remote class has raised an exception.
+
+    Contains a string representation of the type of the original exception,
+    the value of the original exception, and the traceback.  These are
+    sent to the parent as a joined string so printing the exception
+    contains all of the relevant info.
+
+    """
+    msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
+
+    def __init__(self, exc_type=None, value=None, traceback=None):
+        self.exc_type = exc_type
+        self.value = value
+        self.traceback = traceback
+        super(RemoteError, self).__init__(exc_type=exc_type,
+                                          value=value,
+                                          traceback=traceback)
+
+
+class Timeout(RPCException):
+    """Signifies that a timeout has occurred.
+
+    This exception is raised if the rpc_response_timeout is reached while
+    waiting for a response from the remote side.
+    """
+    msg_fmt = _('Timeout while waiting on RPC response - '
+                'topic: "%(topic)s", RPC method: "%(method)s" '
+                'info: "%(info)s"')
+
+    def __init__(self, info=None, topic=None, method=None):
+        """Initiates Timeout object.
+
+        :param info: Extra info to convey to the user
+        :param topic: The topic that the rpc call was sent to
+        :param rpc_method_name: The name of the rpc method being
+                                called
+        """
+        self.info = info
+        self.topic = topic
+        self.method = method
+        super(Timeout, self).__init__(
+            None,
+            info=info or _('<unknown>'),
+            topic=topic or _('<unknown>'),
+            method=method or _('<unknown>'))
+
+
+class DuplicateMessageError(RPCException):
+    msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
+
+
+class InvalidRPCConnectionReuse(RPCException):
+    msg_fmt = _("Invalid reuse of an RPC connection.")
+
+
+class UnsupportedRpcVersion(RPCException):
+    msg_fmt = _("Specified RPC version, %(version)s, not supported by "
+                "this endpoint.")
+
+
+class UnsupportedRpcEnvelopeVersion(RPCException):
+    msg_fmt = _("Specified RPC envelope version, %(version)s, "
+                "not supported by this endpoint.")
+
+
+class RpcVersionCapError(RPCException):
+    msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
+
+
+class Connection(object):
+    """A connection, returned by rpc.create_connection().
+
+    This class represents a connection to the message bus used for rpc.
+    An instance of this class should never be created by users of the rpc API.
+    Use rpc.create_connection() instead.
+    """
+    def close(self):
+        """Close the connection.
+
+        This method must be called when the connection will no longer be used.
+        It will ensure that any resources associated with the connection, such
+        as a network connection, and cleaned up.
+        """
+        raise NotImplementedError()
+
+    def create_consumer(self, topic, proxy, fanout=False):
+        """Create a consumer on this connection.
+
+        A consumer is associated with a message queue on the backend message
+        bus.  The consumer will read messages from the queue, unpack them, and
+        dispatch them to the proxy object.  The contents of the message pulled
+        off of the queue will determine which method gets called on the proxy
+        object.
+
+        :param topic: This is a name associated with what to consume from.
+                      Multiple instances of a service may consume from the same
+                      topic. For example, all instances of nova-compute consume
+                      from a queue called "compute".  In that case, the
+                      messages will get distributed amongst the consumers in a
+                      round-robin fashion if fanout=False.  If fanout=True,
+                      every consumer associated with this topic will get a
+                      copy of every message.
+        :param proxy: The object that will handle all incoming messages.
+        :param fanout: Whether or not this is a fanout topic.  See the
+                       documentation for the topic parameter for some
+                       additional comments on this.
+        """
+        raise NotImplementedError()
+
+    def create_worker(self, topic, proxy, pool_name):
+        """Create a worker on this connection.
+
+        A worker is like a regular consumer of messages directed to a
+        topic, except that it is part of a set of such consumers (the
+        "pool") which may run in parallel. Every pool of workers will
+        receive a given message, but only one worker in the pool will
+        be asked to process it. Load is distributed across the members
+        of the pool in round-robin fashion.
+
+        :param topic: This is a name associated with what to consume from.
+                      Multiple instances of a service may consume from the same
+                      topic.
+        :param proxy: The object that will handle all incoming messages.
+        :param pool_name: String containing the name of the pool of workers
+        """
+        raise NotImplementedError()
+
+    def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
+        """Register as a member of a group of consumers.
+
+        Uses given topic from the specified exchange.
+        Exactly one member of a given pool will receive each message.
+
+        A message will be delivered to multiple pools, if more than
+        one is created.
+
+        :param callback: Callable to be invoked for each message.
+        :type callback: callable accepting one argument
+        :param pool_name: The name of the consumer pool.
+        :type pool_name: str
+        :param topic: The routing topic for desired messages.
+        :type topic: str
+        :param exchange_name: The name of the message exchange where
+                              the client should attach. Defaults to
+                              the configured exchange.
+        :type exchange_name: str
+        """
+        raise NotImplementedError()
+
+    def consume_in_thread(self):
+        """Spawn a thread to handle incoming messages.
+
+        Spawn a thread that will be responsible for handling all incoming
+        messages for consumers that were set up on this connection.
+
+        Message dispatching inside of this is expected to be implemented in a
+        non-blocking manner.  An example implementation would be having this
+        thread pull messages in for all of the consumers, but utilize a thread
+        pool for dispatching the messages to the proxy objects.
+        """
+        raise NotImplementedError()
+
+
+def _safe_log(log_func, msg, msg_data):
+    """Sanitizes the msg_data field before logging."""
+    SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
+
+    def _fix_passwords(d):
+        """Sanitizes the password fields in the dictionary."""
+        for k in d.iterkeys():
+            if k.lower().find('password') != -1:
+                d[k] = '<SANITIZED>'
+            elif k.lower() in SANITIZE:
+                d[k] = '<SANITIZED>'
+            elif isinstance(d[k], dict):
+                _fix_passwords(d[k])
+        return d
+
+    return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
+
+
+def serialize_remote_exception(failure_info, log_failure=True):
+    """Prepares exception data to be sent over rpc.
+
+    Failure_info should be a sys.exc_info() tuple.
+
+    """
+    tb = traceback.format_exception(*failure_info)
+    failure = failure_info[1]
+    if log_failure:
+        LOG.error(_("Returning exception %s to caller"),
+                  six.text_type(failure))
+        LOG.error(tb)
+
+    kwargs = {}
+    if hasattr(failure, 'kwargs'):
+        kwargs = failure.kwargs
+
+    # NOTE(matiu): With cells, it's possible to re-raise remote, remote
+    # exceptions. Lets turn it back into the original exception type.
+    cls_name = str(failure.__class__.__name__)
+    mod_name = str(failure.__class__.__module__)
+    if (cls_name.endswith(_REMOTE_POSTFIX) and
+            mod_name.endswith(_REMOTE_POSTFIX)):
+        cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
+        mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
+
+    data = {
+        'class': cls_name,
+        'module': mod_name,
+        'message': six.text_type(failure),
+        'tb': tb,
+        'args': failure.args,
+        'kwargs': kwargs
+    }
+
+    json_data = jsonutils.dumps(data)
+
+    return json_data
+
+
+def deserialize_remote_exception(conf, data):
+    failure = jsonutils.loads(str(data))
+
+    trace = failure.get('tb', [])
+    message = failure.get('message', "") + "\n" + "\n".join(trace)
+    name = failure.get('class')
+    module = failure.get('module')
+
+    # NOTE(ameade): We DO NOT want to allow just any module to be imported, in
+    # order to prevent arbitrary code execution.
+    if module not in conf.allowed_rpc_exception_modules:
+        return RemoteError(name, failure.get('message'), trace)
+
+    try:
+        mod = importutils.import_module(module)
+        klass = getattr(mod, name)
+        if not issubclass(klass, Exception):
+            raise TypeError("Can only deserialize Exceptions")
+
+        failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
+    except (AttributeError, TypeError, ImportError):
+        return RemoteError(name, failure.get('message'), trace)
+
+    ex_type = type(failure)
+    str_override = lambda self: message
+    new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
+                       {'__str__': str_override, '__unicode__': str_override})
+    new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
+    try:
+        # NOTE(ameade): Dynamically create a new exception type and swap it in
+        # as the new type for the exception. This only works on user defined
+        # Exceptions and not core python exceptions. This is important because
+        # we cannot necessarily change an exception message so we must override
+        # the __str__ method.
+        failure.__class__ = new_ex_type
+    except TypeError:
+        # NOTE(ameade): If a core exception then just add the traceback to the
+        # first exception argument.
+        failure.args = (message,) + failure.args[1:]
+    return failure
+
+
+class CommonRpcContext(object):
+    def __init__(self, **kwargs):
+        self.values = kwargs
+
+    def __getattr__(self, key):
+        try:
+            return self.values[key]
+        except KeyError:
+            raise AttributeError(key)
+
+    def to_dict(self):
+        return copy.deepcopy(self.values)
+
+    @classmethod
+    def from_dict(cls, values):
+        return cls(**values)
+
+    def deepcopy(self):
+        return self.from_dict(self.to_dict())
+
+    def update_store(self):
+        local.store.context = self
+
+    def elevated(self, read_deleted=None, overwrite=False):
+        """Return a version of this context with admin flag set."""
+        # TODO(russellb) This method is a bit of a nova-ism.  It makes
+        # some assumptions about the data in the request context sent
+        # across rpc, while the rest of this class does not.  We could get
+        # rid of this if we changed the nova code that uses this to
+        # convert the RpcContext back to its native RequestContext doing
+        # something like nova.context.RequestContext.from_dict(ctxt.to_dict())
+
+        context = self.deepcopy()
+        context.values['is_admin'] = True
+
+        context.values.setdefault('roles', [])
+
+        if 'admin' not in context.values['roles']:
+            context.values['roles'].append('admin')
+
+        if read_deleted is not None:
+            context.values['read_deleted'] = read_deleted
+
+        return context
+
+
+class ClientException(Exception):
+    """Encapsulates actual exception expected to be hit by a RPC proxy object.
+
+    Merely instantiating it records the current exception information, which
+    will be passed back to the RPC client without exceptional logging.
+    """
+    def __init__(self):
+        self._exc_info = sys.exc_info()
+
+
+def catch_client_exception(exceptions, func, *args, **kwargs):
+    try:
+        return func(*args, **kwargs)
+    except Exception as e:
+        if type(e) in exceptions:
+            raise ClientException()
+        else:
+            raise
+
+
+def client_exceptions(*exceptions):
+    """Decorator for manager methods that raise expected exceptions.
+
+    Marking a Manager method with this decorator allows the declaration
+    of expected exceptions that the RPC layer should not consider fatal,
+    and not log as if they were generated in a real error scenario. Note
+    that this will cause listed exceptions to be wrapped in a
+    ClientException, which is used internally by the RPC layer.
+    """
+    def outer(func):
+        def inner(*args, **kwargs):
+            return catch_client_exception(exceptions, func, *args, **kwargs)
+        return inner
+    return outer
+
+
+def version_is_compatible(imp_version, version):
+    """Determine whether versions are compatible.
+
+    :param imp_version: The version implemented
+    :param version: The version requested by an incoming message.
+    """
+    version_parts = version.split('.')
+    imp_version_parts = imp_version.split('.')
+    if int(version_parts[0]) != int(imp_version_parts[0]):  # Major
+        return False
+    if int(version_parts[1]) > int(imp_version_parts[1]):  # Minor
+        return False
+    return True
+
+
+def serialize_msg(raw_msg):
+    # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
+    # information about this format.
+    msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
+           _MESSAGE_KEY: jsonutils.dumps(raw_msg)}
+
+    return msg
+
+
+def deserialize_msg(msg):
+    # NOTE(russellb): Hang on to your hats, this road is about to
+    # get a little bumpy.
+    #
+    # Robustness Principle:
+    #    "Be strict in what you send, liberal in what you accept."
+    #
+    # At this point we have to do a bit of guessing about what it
+    # is we just received.  Here is the set of possibilities:
+    #
+    # 1) We received a dict.  This could be 2 things:
+    #
+    #   a) Inspect it to see if it looks like a standard message envelope.
+    #      If so, great!
+    #
+    #   b) If it doesn't look like a standard message envelope, it could either
+    #      be a notification, or a message from before we added a message
+    #      envelope (referred to as version 1.0).
+    #      Just return the message as-is.
+    #
+    # 2) It's any other non-dict type.  Just return it and hope for the best.
+    #    This case covers return values from rpc.call() from before message
+    #    envelopes were used.  (messages to call a method were always a dict)
+
+    if not isinstance(msg, dict):
+        # See #2 above.
+        return msg
+
+    base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
+    if not all(map(lambda key: key in msg, base_envelope_keys)):
+        #  See #1.b above.
+        return msg
+
+    # At this point we think we have the message envelope
+    # format we were expecting. (#1.a above)
+
+    if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
+        raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
+
+    raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
+
+    return raw_msg
diff --git a/conductor/openstack/common/rpc/dispatcher.py b/conductor/openstack/common/rpc/dispatcher.py
new file mode 100644
index 0000000..00a36d6
--- /dev/null
+++ b/conductor/openstack/common/rpc/dispatcher.py
@@ -0,0 +1,178 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Code for rpc message dispatching.
+
+Messages that come in have a version number associated with them.  RPC API
+version numbers are in the form:
+
+    Major.Minor
+
+For a given message with version X.Y, the receiver must be marked as able to
+handle messages of version A.B, where:
+
+    A = X
+
+    B >= Y
+
+The Major version number would be incremented for an almost completely new API.
+The Minor version number would be incremented for backwards compatible changes
+to an existing API.  A backwards compatible change could be something like
+adding a new method, adding an argument to an existing method (but not
+requiring it), or changing the type for an existing argument (but still
+handling the old type as well).
+
+The conversion over to a versioned API must be done on both the client side and
+server side of the API at the same time.  However, as the code stands today,
+there can be both versioned and unversioned APIs implemented in the same code
+base.
+
+EXAMPLES
+========
+
+Nova was the first project to use versioned rpc APIs.  Consider the compute rpc
+API as an example.  The client side is in nova/compute/rpcapi.py and the server
+side is in nova/compute/manager.py.
+
+
+Example 1) Adding a new method.
+-------------------------------
+
+Adding a new method is a backwards compatible change.  It should be added to
+nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
+X.Y+1.  On the client side, the new method in nova/compute/rpcapi.py should
+have a specific version specified to indicate the minimum API version that must
+be implemented for the method to be supported.  For example::
+
+    def get_host_uptime(self, ctxt, host):
+        topic = _compute_topic(self.topic, ctxt, host, None)
+        return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
+                version='1.1')
+
+In this case, version '1.1' is the first version that supported the
+get_host_uptime() method.
+
+
+Example 2) Adding a new parameter.
+----------------------------------
+
+Adding a new parameter to an rpc method can be made backwards compatible.  The
+RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
+The implementation of the method must not expect the parameter to be present.::
+
+    def some_remote_method(self, arg1, arg2, newarg=None):
+        # The code needs to deal with newarg=None for cases
+        # where an older client sends a message without it.
+        pass
+
+On the client side, the same changes should be made as in example 1.  The
+minimum version that supports the new parameter should be specified.
+"""
+
+from conductor.openstack.common.rpc import common as rpc_common
+from conductor.openstack.common.rpc import serializer as rpc_serializer
+
+
+class RpcDispatcher(object):
+    """Dispatch rpc messages according to the requested API version.
+
+    This class can be used as the top level 'manager' for a service.  It
+    contains a list of underlying managers that have an API_VERSION attribute.
+    """
+
+    def __init__(self, callbacks, serializer=None):
+        """Initialize the rpc dispatcher.
+
+        :param callbacks: List of proxy objects that are an instance
+                          of a class with rpc methods exposed.  Each proxy
+                          object should have an RPC_API_VERSION attribute.
+        :param serializer: The Serializer object that will be used to
+                           deserialize arguments before the method call and
+                           to serialize the result after it returns.
+        """
+        self.callbacks = callbacks
+        if serializer is None:
+            serializer = rpc_serializer.NoOpSerializer()
+        self.serializer = serializer
+        super(RpcDispatcher, self).__init__()
+
+    def _deserialize_args(self, context, kwargs):
+        """Helper method called to deserialize args before dispatch.
+
+        This calls our serializer on each argument, returning a new set of
+        args that have been deserialized.
+
+        :param context: The request context
+        :param kwargs: The arguments to be deserialized
+        :returns: A new set of deserialized args
+        """
+        new_kwargs = dict()
+        for argname, arg in kwargs.iteritems():
+            new_kwargs[argname] = self.serializer.deserialize_entity(context,
+                                                                     arg)
+        return new_kwargs
+
+    def dispatch(self, ctxt, version, method, namespace, **kwargs):
+        """Dispatch a message based on a requested version.
+
+        :param ctxt: The request context
+        :param version: The requested API version from the incoming message
+        :param method: The method requested to be called by the incoming
+                       message.
+        :param namespace: The namespace for the requested method.  If None,
+                          the dispatcher will look for a method on a callback
+                          object with no namespace set.
+        :param kwargs: A dict of keyword arguments to be passed to the method.
+
+        :returns: Whatever is returned by the underlying method that gets
+                  called.
+        """
+        if not version:
+            version = '1.0'
+
+        had_compatible = False
+        for proxyobj in self.callbacks:
+            # Check for namespace compatibility
+            try:
+                cb_namespace = proxyobj.RPC_API_NAMESPACE
+            except AttributeError:
+                cb_namespace = None
+
+            if namespace != cb_namespace:
+                continue
+
+            # Check for version compatibility
+            try:
+                rpc_api_version = proxyobj.RPC_API_VERSION
+            except AttributeError:
+                rpc_api_version = '1.0'
+
+            is_compatible = rpc_common.version_is_compatible(rpc_api_version,
+                                                             version)
+            had_compatible = had_compatible or is_compatible
+
+            if not hasattr(proxyobj, method):
+                continue
+            if is_compatible:
+                kwargs = self._deserialize_args(ctxt, kwargs)
+                result = getattr(proxyobj, method)(ctxt, **kwargs)
+                return self.serializer.serialize_entity(ctxt, result)
+
+        if had_compatible:
+            raise AttributeError("No such RPC function '%s'" % method)
+        else:
+            raise rpc_common.UnsupportedRpcVersion(version=version)
diff --git a/conductor/openstack/common/rpc/impl_fake.py b/conductor/openstack/common/rpc/impl_fake.py
new file mode 100644
index 0000000..277ebf4
--- /dev/null
+++ b/conductor/openstack/common/rpc/impl_fake.py
@@ -0,0 +1,195 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2011 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""Fake RPC implementation which calls proxy methods directly with no
+queues.  Casts will block, but this is very useful for tests.
+"""
+
+import inspect
+# NOTE(russellb): We specifically want to use json, not our own jsonutils.
+# jsonutils has some extra logic to automatically convert objects to primitive
+# types so that they can be serialized.  We want to catch all cases where
+# non-primitive types make it into this code and treat it as an error.
+import json
+import time
+
+import eventlet
+
+from conductor.openstack.common.rpc import common as rpc_common
+
+CONSUMERS = {}
+
+
+class RpcContext(rpc_common.CommonRpcContext):
+    def __init__(self, **kwargs):
+        super(RpcContext, self).__init__(**kwargs)
+        self._response = []
+        self._done = False
+
+    def deepcopy(self):
+        values = self.to_dict()
+        new_inst = self.__class__(**values)
+        new_inst._response = self._response
+        new_inst._done = self._done
+        return new_inst
+
+    def reply(self, reply=None, failure=None, ending=False):
+        if ending:
+            self._done = True
+        if not self._done:
+            self._response.append((reply, failure))
+
+
+class Consumer(object):
+    def __init__(self, topic, proxy):
+        self.topic = topic
+        self.proxy = proxy
+
+    def call(self, context, version, method, namespace, args, timeout):
+        done = eventlet.event.Event()
+
+        def _inner():
+            ctxt = RpcContext.from_dict(context.to_dict())
+            try:
+                rval = self.proxy.dispatch(context, version, method,
+                                           namespace, **args)
+                res = []
+                # Caller might have called ctxt.reply() manually
+                for (reply, failure) in ctxt._response:
+                    if failure:
+                        raise failure[0], failure[1], failure[2]
+                    res.append(reply)
+                # if ending not 'sent'...we might have more data to
+                # return from the function itself
+                if not ctxt._done:
+                    if inspect.isgenerator(rval):
+                        for val in rval:
+                            res.append(val)
+                    else:
+                        res.append(rval)
+                done.send(res)
+            except rpc_common.ClientException as e:
+                done.send_exception(e._exc_info[1])
+            except Exception as e:
+                done.send_exception(e)
+
+        thread = eventlet.greenthread.spawn(_inner)
+
+        if timeout:
+            start_time = time.time()
+            while not done.ready():
+                eventlet.greenthread.sleep(1)
+                cur_time = time.time()
+                if (cur_time - start_time) > timeout:
+                    thread.kill()
+                    raise rpc_common.Timeout()
+
+        return done.wait()
+
+
+class Connection(object):
+    """Connection object."""
+
+    def __init__(self):
+        self.consumers = []
+
+    def create_consumer(self, topic, proxy, fanout=False):
+        consumer = Consumer(topic, proxy)
+        self.consumers.append(consumer)
+        if topic not in CONSUMERS:
+            CONSUMERS[topic] = []
+        CONSUMERS[topic].append(consumer)
+
+    def close(self):
+        for consumer in self.consumers:
+            CONSUMERS[consumer.topic].remove(consumer)
+        self.consumers = []
+
+    def consume_in_thread(self):
+        pass
+
+
+def create_connection(conf, new=True):
+    """Create a connection."""
+    return Connection()
+
+
+def check_serialize(msg):
+    """Make sure a message intended for rpc can be serialized."""
+    json.dumps(msg)
+
+
+def multicall(conf, context, topic, msg, timeout=None):
+    """Make a call that returns multiple times."""
+
+    check_serialize(msg)
+
+    method = msg.get('method')
+    if not method:
+        return
+    args = msg.get('args', {})
+    version = msg.get('version', None)
+    namespace = msg.get('namespace', None)
+
+    try:
+        consumer = CONSUMERS[topic][0]
+    except (KeyError, IndexError):
+        return iter([None])
+    else:
+        return consumer.call(context, version, method, namespace, args,
+                             timeout)
+
+
+def call(conf, context, topic, msg, timeout=None):
+    """Sends a message on a topic and wait for a response."""
+    rv = multicall(conf, context, topic, msg, timeout)
+    # NOTE(vish): return the last result from the multicall
+    rv = list(rv)
+    if not rv:
+        return
+    return rv[-1]
+
+
+def cast(conf, context, topic, msg):
+    check_serialize(msg)
+    try:
+        call(conf, context, topic, msg)
+    except Exception:
+        pass
+
+
+def notify(conf, context, topic, msg, envelope):
+    check_serialize(msg)
+
+
+def cleanup():
+    pass
+
+
+def fanout_cast(conf, context, topic, msg):
+    """Cast to all consumers of a topic."""
+    check_serialize(msg)
+    method = msg.get('method')
+    if not method:
+        return
+    args = msg.get('args', {})
+    version = msg.get('version', None)
+    namespace = msg.get('namespace', None)
+
+    for consumer in CONSUMERS.get(topic, []):
+        try:
+            consumer.call(context, version, method, namespace, args, None)
+        except Exception:
+            pass
diff --git a/conductor/openstack/common/rpc/impl_kombu.py b/conductor/openstack/common/rpc/impl_kombu.py
new file mode 100644
index 0000000..3c120ca
--- /dev/null
+++ b/conductor/openstack/common/rpc/impl_kombu.py
@@ -0,0 +1,865 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2011 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import functools
+import itertools
+import socket
+import ssl
+import time
+import uuid
+
+import eventlet
+import greenlet
+import kombu
+import kombu.connection
+import kombu.entity
+import kombu.messaging
+from oslo.config import cfg
+
+from conductor.openstack.common import excutils
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import network_utils
+from conductor.openstack.common.rpc import amqp as rpc_amqp
+from conductor.openstack.common.rpc import common as rpc_common
+from conductor.openstack.common import sslutils
+
+kombu_opts = [
+    cfg.StrOpt('kombu_ssl_version',
+               default='',
+               help='SSL version to use (valid only if SSL enabled). '
+                    'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
+                    'be available on some distributions'
+               ),
+    cfg.StrOpt('kombu_ssl_keyfile',
+               default='',
+               help='SSL key file (valid only if SSL enabled)'),
+    cfg.StrOpt('kombu_ssl_certfile',
+               default='',
+               help='SSL cert file (valid only if SSL enabled)'),
+    cfg.StrOpt('kombu_ssl_ca_certs',
+               default='',
+               help=('SSL certification authority file '
+                     '(valid only if SSL enabled)')),
+    cfg.StrOpt('rabbit_host',
+               default='localhost',
+               help='The RabbitMQ broker address where a single node is used'),
+    cfg.IntOpt('rabbit_port',
+               default=5672,
+               help='The RabbitMQ broker port where a single node is used'),
+    cfg.ListOpt('rabbit_hosts',
+                default=['$rabbit_host:$rabbit_port'],
+                help='RabbitMQ HA cluster host:port pairs'),
+    cfg.BoolOpt('rabbit_use_ssl',
+                default=False,
+                help='connect over SSL for RabbitMQ'),
+    cfg.StrOpt('rabbit_userid',
+               default='guest',
+               help='the RabbitMQ userid'),
+    cfg.StrOpt('rabbit_password',
+               default='guest',
+               help='the RabbitMQ password',
+               secret=True),
+    cfg.StrOpt('rabbit_virtual_host',
+               default='/',
+               help='the RabbitMQ virtual host'),
+    cfg.IntOpt('rabbit_retry_interval',
+               default=1,
+               help='how frequently to retry connecting with RabbitMQ'),
+    cfg.IntOpt('rabbit_retry_backoff',
+               default=2,
+               help='how long to backoff for between retries when connecting '
+                    'to RabbitMQ'),
+    cfg.IntOpt('rabbit_max_retries',
+               default=0,
+               help='maximum retries with trying to connect to RabbitMQ '
+                    '(the default of 0 implies an infinite retry count)'),
+    cfg.BoolOpt('rabbit_ha_queues',
+                default=False,
+                help='use H/A queues in RabbitMQ (x-ha-policy: all).'
+                     'You need to wipe RabbitMQ database when '
+                     'changing this option.'),
+
+]
+
+cfg.CONF.register_opts(kombu_opts)
+
+LOG = rpc_common.LOG
+
+
+def _get_queue_arguments(conf):
+    """Construct the arguments for declaring a queue.
+
+    If the rabbit_ha_queues option is set, we declare a mirrored queue
+    as described here:
+
+      http://www.rabbitmq.com/ha.html
+
+    Setting x-ha-policy to all means that the queue will be mirrored
+    to all nodes in the cluster.
+    """
+    return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
+
+
+class ConsumerBase(object):
+    """Consumer base class."""
+
+    def __init__(self, channel, callback, tag, **kwargs):
+        """Declare a queue on an amqp channel.
+
+        'channel' is the amqp channel to use
+        'callback' is the callback to call when messages are received
+        'tag' is a unique ID for the consumer on the channel
+
+        queue name, exchange name, and other kombu options are
+        passed in here as a dictionary.
+        """
+        self.callback = callback
+        self.tag = str(tag)
+        self.kwargs = kwargs
+        self.queue = None
+        self.ack_on_error = kwargs.get('ack_on_error', True)
+        self.reconnect(channel)
+
+    def reconnect(self, channel):
+        """Re-declare the queue after a rabbit reconnect."""
+        self.channel = channel
+        self.kwargs['channel'] = channel
+        self.queue = kombu.entity.Queue(**self.kwargs)
+        self.queue.declare()
+
+    def _callback_handler(self, message, callback):
+        """Call callback with deserialized message.
+
+        Messages that are processed without exception are ack'ed.
+
+        If the message processing generates an exception, it will be
+        ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
+        Rejection is better than waiting for the message to timeout.
+        Rejected messages are immediately requeued.
+        """
+
+        ack_msg = False
+        try:
+            msg = rpc_common.deserialize_msg(message.payload)
+            callback(msg)
+            ack_msg = True
+        except Exception:
+            if self.ack_on_error:
+                ack_msg = True
+                LOG.exception(_("Failed to process message"
+                                " ... skipping it."))
+            else:
+                LOG.exception(_("Failed to process message"
+                                " ... will requeue."))
+        finally:
+            if ack_msg:
+                message.ack()
+            else:
+                message.reject()
+
+    def consume(self, *args, **kwargs):
+        """Actually declare the consumer on the amqp channel.  This will
+        start the flow of messages from the queue.  Using the
+        Connection.iterconsume() iterator will process the messages,
+        calling the appropriate callback.
+
+        If a callback is specified in kwargs, use that.  Otherwise,
+        use the callback passed during __init__()
+
+        If kwargs['nowait'] is True, then this call will block until
+        a message is read.
+
+        """
+
+        options = {'consumer_tag': self.tag}
+        options['nowait'] = kwargs.get('nowait', False)
+        callback = kwargs.get('callback', self.callback)
+        if not callback:
+            raise ValueError("No callback defined")
+
+        def _callback(raw_message):
+            message = self.channel.message_to_python(raw_message)
+            self._callback_handler(message, callback)
+
+        self.queue.consume(*args, callback=_callback, **options)
+
+    def cancel(self):
+        """Cancel the consuming from the queue, if it has started."""
+        try:
+            self.queue.cancel(self.tag)
+        except KeyError as e:
+            # NOTE(comstud): Kludge to get around a amqplib bug
+            if str(e) != "u'%s'" % self.tag:
+                raise
+        self.queue = None
+
+
+class DirectConsumer(ConsumerBase):
+    """Queue/consumer class for 'direct'."""
+
+    def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
+        """Init a 'direct' queue.
+
+        'channel' is the amqp channel to use
+        'msg_id' is the msg_id to listen on
+        'callback' is the callback to call when messages are received
+        'tag' is a unique ID for the consumer on the channel
+
+        Other kombu options may be passed
+        """
+        # Default options
+        options = {'durable': False,
+                   'queue_arguments': _get_queue_arguments(conf),
+                   'auto_delete': True,
+                   'exclusive': False}
+        options.update(kwargs)
+        exchange = kombu.entity.Exchange(name=msg_id,
+                                         type='direct',
+                                         durable=options['durable'],
+                                         auto_delete=options['auto_delete'])
+        super(DirectConsumer, self).__init__(channel,
+                                             callback,
+                                             tag,
+                                             name=msg_id,
+                                             exchange=exchange,
+                                             routing_key=msg_id,
+                                             **options)
+
+
+class TopicConsumer(ConsumerBase):
+    """Consumer class for 'topic'."""
+
+    def __init__(self, conf, channel, topic, callback, tag, name=None,
+                 exchange_name=None, **kwargs):
+        """Init a 'topic' queue.
+
+        :param channel: the amqp channel to use
+        :param topic: the topic to listen on
+        :paramtype topic: str
+        :param callback: the callback to call when messages are received
+        :param tag: a unique ID for the consumer on the channel
+        :param name: optional queue name, defaults to topic
+        :paramtype name: str
+
+        Other kombu options may be passed as keyword arguments
+        """
+        # Default options
+        options = {'durable': conf.amqp_durable_queues,
+                   'queue_arguments': _get_queue_arguments(conf),
+                   'auto_delete': conf.amqp_auto_delete,
+                   'exclusive': False}
+        options.update(kwargs)
+        exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
+        exchange = kombu.entity.Exchange(name=exchange_name,
+                                         type='topic',
+                                         durable=options['durable'],
+                                         auto_delete=options['auto_delete'])
+        super(TopicConsumer, self).__init__(channel,
+                                            callback,
+                                            tag,
+                                            name=name or topic,
+                                            exchange=exchange,
+                                            routing_key=topic,
+                                            **options)
+
+
+class FanoutConsumer(ConsumerBase):
+    """Consumer class for 'fanout'."""
+
+    def __init__(self, conf, channel, topic, callback, tag, **kwargs):
+        """Init a 'fanout' queue.
+
+        'channel' is the amqp channel to use
+        'topic' is the topic to listen on
+        'callback' is the callback to call when messages are received
+        'tag' is a unique ID for the consumer on the channel
+
+        Other kombu options may be passed
+        """
+        unique = uuid.uuid4().hex
+        exchange_name = '%s_fanout' % topic
+        queue_name = '%s_fanout_%s' % (topic, unique)
+
+        # Default options
+        options = {'durable': False,
+                   'queue_arguments': _get_queue_arguments(conf),
+                   'auto_delete': True,
+                   'exclusive': False}
+        options.update(kwargs)
+        exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
+                                         durable=options['durable'],
+                                         auto_delete=options['auto_delete'])
+        super(FanoutConsumer, self).__init__(channel, callback, tag,
+                                             name=queue_name,
+                                             exchange=exchange,
+                                             routing_key=topic,
+                                             **options)
+
+
+class Publisher(object):
+    """Base Publisher class."""
+
+    def __init__(self, channel, exchange_name, routing_key, **kwargs):
+        """Init the Publisher class with the exchange_name, routing_key,
+        and other options
+        """
+        self.exchange_name = exchange_name
+        self.routing_key = routing_key
+        self.kwargs = kwargs
+        self.reconnect(channel)
+
+    def reconnect(self, channel):
+        """Re-establish the Producer after a rabbit reconnection."""
+        self.exchange = kombu.entity.Exchange(name=self.exchange_name,
+                                              **self.kwargs)
+        self.producer = kombu.messaging.Producer(exchange=self.exchange,
+                                                 channel=channel,
+                                                 routing_key=self.routing_key)
+
+    def send(self, msg, timeout=None):
+        """Send a message."""
+        if timeout:
+            #
+            # AMQP TTL is in milliseconds when set in the header.
+            #
+            self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
+        else:
+            self.producer.publish(msg)
+
+
+class DirectPublisher(Publisher):
+    """Publisher class for 'direct'."""
+    def __init__(self, conf, channel, msg_id, **kwargs):
+        """init a 'direct' publisher.
+
+        Kombu options may be passed as keyword args to override defaults
+        """
+
+        options = {'durable': False,
+                   'auto_delete': True,
+                   'exclusive': False}
+        options.update(kwargs)
+        super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
+                                              type='direct', **options)
+
+
+class TopicPublisher(Publisher):
+    """Publisher class for 'topic'."""
+    def __init__(self, conf, channel, topic, **kwargs):
+        """init a 'topic' publisher.
+
+        Kombu options may be passed as keyword args to override defaults
+        """
+        options = {'durable': conf.amqp_durable_queues,
+                   'auto_delete': conf.amqp_auto_delete,
+                   'exclusive': False}
+        options.update(kwargs)
+        exchange_name = rpc_amqp.get_control_exchange(conf)
+        super(TopicPublisher, self).__init__(channel,
+                                             exchange_name,
+                                             topic,
+                                             type='topic',
+                                             **options)
+
+
+class FanoutPublisher(Publisher):
+    """Publisher class for 'fanout'."""
+    def __init__(self, conf, channel, topic, **kwargs):
+        """init a 'fanout' publisher.
+
+        Kombu options may be passed as keyword args to override defaults
+        """
+        options = {'durable': False,
+                   'auto_delete': True,
+                   'exclusive': False}
+        options.update(kwargs)
+        super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
+                                              None, type='fanout', **options)
+
+
+class NotifyPublisher(TopicPublisher):
+    """Publisher class for 'notify'."""
+
+    def __init__(self, conf, channel, topic, **kwargs):
+        self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
+        self.queue_arguments = _get_queue_arguments(conf)
+        super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
+
+    def reconnect(self, channel):
+        super(NotifyPublisher, self).reconnect(channel)
+
+        # NOTE(jerdfelt): Normally the consumer would create the queue, but
+        # we do this to ensure that messages don't get dropped if the
+        # consumer is started after we do
+        queue = kombu.entity.Queue(channel=channel,
+                                   exchange=self.exchange,
+                                   durable=self.durable,
+                                   name=self.routing_key,
+                                   routing_key=self.routing_key,
+                                   queue_arguments=self.queue_arguments)
+        queue.declare()
+
+
+class Connection(object):
+    """Connection object."""
+
+    pool = None
+
+    def __init__(self, conf, server_params=None):
+        self.consumers = []
+        self.consumer_thread = None
+        self.proxy_callbacks = []
+        self.conf = conf
+        self.max_retries = self.conf.rabbit_max_retries
+        # Try forever?
+        if self.max_retries <= 0:
+            self.max_retries = None
+        self.interval_start = self.conf.rabbit_retry_interval
+        self.interval_stepping = self.conf.rabbit_retry_backoff
+        # max retry-interval = 30 seconds
+        self.interval_max = 30
+        self.memory_transport = False
+
+        if server_params is None:
+            server_params = {}
+        # Keys to translate from server_params to kombu params
+        server_params_to_kombu_params = {'username': 'userid'}
+
+        ssl_params = self._fetch_ssl_params()
+        params_list = []
+        for adr in self.conf.rabbit_hosts:
+            hostname, port = network_utils.parse_host_port(
+                adr, default_port=self.conf.rabbit_port)
+
+            params = {
+                'hostname': hostname,
+                'port': port,
+                'userid': self.conf.rabbit_userid,
+                'password': self.conf.rabbit_password,
+                'virtual_host': self.conf.rabbit_virtual_host,
+            }
+
+            for sp_key, value in server_params.iteritems():
+                p_key = server_params_to_kombu_params.get(sp_key, sp_key)
+                params[p_key] = value
+
+            if self.conf.fake_rabbit:
+                params['transport'] = 'memory'
+            if self.conf.rabbit_use_ssl:
+                params['ssl'] = ssl_params
+
+            params_list.append(params)
+
+        self.params_list = params_list
+
+        self.memory_transport = self.conf.fake_rabbit
+
+        self.connection = None
+        self.reconnect()
+
+    def _fetch_ssl_params(self):
+        """Handles fetching what ssl params should be used for the connection
+        (if any).
+        """
+        ssl_params = dict()
+
+        # http://docs.python.org/library/ssl.html - ssl.wrap_socket
+        if self.conf.kombu_ssl_version:
+            ssl_params['ssl_version'] = sslutils.validate_ssl_version(
+                self.conf.kombu_ssl_version)
+        if self.conf.kombu_ssl_keyfile:
+            ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
+        if self.conf.kombu_ssl_certfile:
+            ssl_params['certfile'] = self.conf.kombu_ssl_certfile
+        if self.conf.kombu_ssl_ca_certs:
+            ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
+            # We might want to allow variations in the
+            # future with this?
+            ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
+
+        if not ssl_params:
+            # Just have the default behavior
+            return True
+        else:
+            # Return the extended behavior
+            return ssl_params
+
+    def _connect(self, params):
+        """Connect to rabbit.  Re-establish any queues that may have
+        been declared before if we are reconnecting.  Exceptions should
+        be handled by the caller.
+        """
+        if self.connection:
+            LOG.info(_("Reconnecting to AMQP server on "
+                     "%(hostname)s:%(port)d") % params)
+            try:
+                self.connection.release()
+            except self.connection_errors:
+                pass
+            # Setting this in case the next statement fails, though
+            # it shouldn't be doing any network operations, yet.
+            self.connection = None
+        self.connection = kombu.connection.BrokerConnection(**params)
+        self.connection_errors = self.connection.connection_errors
+        if self.memory_transport:
+            # Kludge to speed up tests.
+            self.connection.transport.polling_interval = 0.0
+        self.consumer_num = itertools.count(1)
+        self.connection.connect()
+        self.channel = self.connection.channel()
+        # work around 'memory' transport bug in 1.1.3
+        if self.memory_transport:
+            self.channel._new_queue('ae.undeliver')
+        for consumer in self.consumers:
+            consumer.reconnect(self.channel)
+        LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
+                 params)
+
+    def reconnect(self):
+        """Handles reconnecting and re-establishing queues.
+        Will retry up to self.max_retries number of times.
+        self.max_retries = 0 means to retry forever.
+        Sleep between tries, starting at self.interval_start
+        seconds, backing off self.interval_stepping number of seconds
+        each attempt.
+        """
+
+        attempt = 0
+        while True:
+            params = self.params_list[attempt % len(self.params_list)]
+            attempt += 1
+            try:
+                self._connect(params)
+                return
+            except (IOError, self.connection_errors) as e:
+                pass
+            except Exception as e:
+                # NOTE(comstud): Unfortunately it's possible for amqplib
+                # to return an error not covered by its transport
+                # connection_errors in the case of a timeout waiting for
+                # a protocol response.  (See paste link in LP888621)
+                # So, we check all exceptions for 'timeout' in them
+                # and try to reconnect in this case.
+                if 'timeout' not in str(e):
+                    raise
+
+            log_info = {}
+            log_info['err_str'] = str(e)
+            log_info['max_retries'] = self.max_retries
+            log_info.update(params)
+
+            if self.max_retries and attempt == self.max_retries:
+                msg = _('Unable to connect to AMQP server on '
+                        '%(hostname)s:%(port)d after %(max_retries)d '
+                        'tries: %(err_str)s') % log_info
+                LOG.error(msg)
+                raise rpc_common.RPCException(msg)
+
+            if attempt == 1:
+                sleep_time = self.interval_start or 1
+            elif attempt > 1:
+                sleep_time += self.interval_stepping
+            if self.interval_max:
+                sleep_time = min(sleep_time, self.interval_max)
+
+            log_info['sleep_time'] = sleep_time
+            LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
+                        'unreachable: %(err_str)s. Trying again in '
+                        '%(sleep_time)d seconds.') % log_info)
+            time.sleep(sleep_time)
+
+    def ensure(self, error_callback, method, *args, **kwargs):
+        while True:
+            try:
+                return method(*args, **kwargs)
+            except (self.connection_errors, socket.timeout, IOError) as e:
+                if error_callback:
+                    error_callback(e)
+            except Exception as e:
+                # NOTE(comstud): Unfortunately it's possible for amqplib
+                # to return an error not covered by its transport
+                # connection_errors in the case of a timeout waiting for
+                # a protocol response.  (See paste link in LP888621)
+                # So, we check all exceptions for 'timeout' in them
+                # and try to reconnect in this case.
+                if 'timeout' not in str(e):
+                    raise
+                if error_callback:
+                    error_callback(e)
+            self.reconnect()
+
+    def get_channel(self):
+        """Convenience call for bin/clear_rabbit_queues."""
+        return self.channel
+
+    def close(self):
+        """Close/release this connection."""
+        self.cancel_consumer_thread()
+        self.wait_on_proxy_callbacks()
+        self.connection.release()
+        self.connection = None
+
+    def reset(self):
+        """Reset a connection so it can be used again."""
+        self.cancel_consumer_thread()
+        self.wait_on_proxy_callbacks()
+        self.channel.close()
+        self.channel = self.connection.channel()
+        # work around 'memory' transport bug in 1.1.3
+        if self.memory_transport:
+            self.channel._new_queue('ae.undeliver')
+        self.consumers = []
+
+    def declare_consumer(self, consumer_cls, topic, callback):
+        """Create a Consumer using the class that was passed in and
+        add it to our list of consumers
+        """
+
+        def _connect_error(exc):
+            log_info = {'topic': topic, 'err_str': str(exc)}
+            LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
+                      "%(err_str)s") % log_info)
+
+        def _declare_consumer():
+            consumer = consumer_cls(self.conf, self.channel, topic, callback,
+                                    self.consumer_num.next())
+            self.consumers.append(consumer)
+            return consumer
+
+        return self.ensure(_connect_error, _declare_consumer)
+
+    def iterconsume(self, limit=None, timeout=None):
+        """Return an iterator that will consume from all queues/consumers."""
+
+        info = {'do_consume': True}
+
+        def _error_callback(exc):
+            if isinstance(exc, socket.timeout):
+                LOG.debug(_('Timed out waiting for RPC response: %s') %
+                          str(exc))
+                raise rpc_common.Timeout()
+            else:
+                LOG.exception(_('Failed to consume message from queue: %s') %
+                              str(exc))
+                info['do_consume'] = True
+
+        def _consume():
+            if info['do_consume']:
+                queues_head = self.consumers[:-1]  # not fanout.
+                queues_tail = self.consumers[-1]  # fanout
+                for queue in queues_head:
+                    queue.consume(nowait=True)
+                queues_tail.consume(nowait=False)
+                info['do_consume'] = False
+            return self.connection.drain_events(timeout=timeout)
+
+        for iteration in itertools.count(0):
+            if limit and iteration >= limit:
+                raise StopIteration
+            yield self.ensure(_error_callback, _consume)
+
+    def cancel_consumer_thread(self):
+        """Cancel a consumer thread."""
+        if self.consumer_thread is not None:
+            self.consumer_thread.kill()
+            try:
+                self.consumer_thread.wait()
+            except greenlet.GreenletExit:
+                pass
+            self.consumer_thread = None
+
+    def wait_on_proxy_callbacks(self):
+        """Wait for all proxy callback threads to exit."""
+        for proxy_cb in self.proxy_callbacks:
+            proxy_cb.wait()
+
+    def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
+        """Send to a publisher based on the publisher class."""
+
+        def _error_callback(exc):
+            log_info = {'topic': topic, 'err_str': str(exc)}
+            LOG.exception(_("Failed to publish message to topic "
+                          "'%(topic)s': %(err_str)s") % log_info)
+
+        def _publish():
+            publisher = cls(self.conf, self.channel, topic, **kwargs)
+            publisher.send(msg, timeout)
+
+        self.ensure(_error_callback, _publish)
+
+    def declare_direct_consumer(self, topic, callback):
+        """Create a 'direct' queue.
+        In nova's use, this is generally a msg_id queue used for
+        responses for call/multicall
+        """
+        self.declare_consumer(DirectConsumer, topic, callback)
+
+    def declare_topic_consumer(self, topic, callback=None, queue_name=None,
+                               exchange_name=None, ack_on_error=True):
+        """Create a 'topic' consumer."""
+        self.declare_consumer(functools.partial(TopicConsumer,
+                                                name=queue_name,
+                                                exchange_name=exchange_name,
+                                                ack_on_error=ack_on_error,
+                                                ),
+                              topic, callback)
+
+    def declare_fanout_consumer(self, topic, callback):
+        """Create a 'fanout' consumer."""
+        self.declare_consumer(FanoutConsumer, topic, callback)
+
+    def direct_send(self, msg_id, msg):
+        """Send a 'direct' message."""
+        self.publisher_send(DirectPublisher, msg_id, msg)
+
+    def topic_send(self, topic, msg, timeout=None):
+        """Send a 'topic' message."""
+        self.publisher_send(TopicPublisher, topic, msg, timeout)
+
+    def fanout_send(self, topic, msg):
+        """Send a 'fanout' message."""
+        self.publisher_send(FanoutPublisher, topic, msg)
+
+    def notify_send(self, topic, msg, **kwargs):
+        """Send a notify message on a topic."""
+        self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
+
+    def consume(self, limit=None):
+        """Consume from all queues/consumers."""
+        it = self.iterconsume(limit=limit)
+        while True:
+            try:
+                it.next()
+            except StopIteration:
+                return
+
+    def consume_in_thread(self):
+        """Consumer from all queues/consumers in a greenthread."""
+        @excutils.forever_retry_uncaught_exceptions
+        def _consumer_thread():
+            try:
+                self.consume()
+            except greenlet.GreenletExit:
+                return
+        if self.consumer_thread is None:
+            self.consumer_thread = eventlet.spawn(_consumer_thread)
+        return self.consumer_thread
+
+    def create_consumer(self, topic, proxy, fanout=False):
+        """Create a consumer that calls a method in a proxy object."""
+        proxy_cb = rpc_amqp.ProxyCallback(
+            self.conf, proxy,
+            rpc_amqp.get_connection_pool(self.conf, Connection))
+        self.proxy_callbacks.append(proxy_cb)
+
+        if fanout:
+            self.declare_fanout_consumer(topic, proxy_cb)
+        else:
+            self.declare_topic_consumer(topic, proxy_cb)
+
+    def create_worker(self, topic, proxy, pool_name):
+        """Create a worker that calls a method in a proxy object."""
+        proxy_cb = rpc_amqp.ProxyCallback(
+            self.conf, proxy,
+            rpc_amqp.get_connection_pool(self.conf, Connection))
+        self.proxy_callbacks.append(proxy_cb)
+        self.declare_topic_consumer(topic, proxy_cb, pool_name)
+
+    def join_consumer_pool(self, callback, pool_name, topic,
+                           exchange_name=None, ack_on_error=True):
+        """Register as a member of a group of consumers for a given topic from
+        the specified exchange.
+
+        Exactly one member of a given pool will receive each message.
+
+        A message will be delivered to multiple pools, if more than
+        one is created.
+        """
+        callback_wrapper = rpc_amqp.CallbackWrapper(
+            conf=self.conf,
+            callback=callback,
+            connection_pool=rpc_amqp.get_connection_pool(self.conf,
+                                                         Connection),
+        )
+        self.proxy_callbacks.append(callback_wrapper)
+        self.declare_topic_consumer(
+            queue_name=pool_name,
+            topic=topic,
+            exchange_name=exchange_name,
+            callback=callback_wrapper,
+            ack_on_error=ack_on_error,
+        )
+
+
+def create_connection(conf, new=True):
+    """Create a connection."""
+    return rpc_amqp.create_connection(
+        conf, new,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def multicall(conf, context, topic, msg, timeout=None):
+    """Make a call that returns multiple times."""
+    return rpc_amqp.multicall(
+        conf, context, topic, msg, timeout,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def call(conf, context, topic, msg, timeout=None):
+    """Sends a message on a topic and wait for a response."""
+    return rpc_amqp.call(
+        conf, context, topic, msg, timeout,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast(conf, context, topic, msg):
+    """Sends a message on a topic without waiting for a response."""
+    return rpc_amqp.cast(
+        conf, context, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast(conf, context, topic, msg):
+    """Sends a message on a fanout exchange without waiting for a response."""
+    return rpc_amqp.fanout_cast(
+        conf, context, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast_to_server(conf, context, server_params, topic, msg):
+    """Sends a message on a topic to a specific server."""
+    return rpc_amqp.cast_to_server(
+        conf, context, server_params, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast_to_server(conf, context, server_params, topic, msg):
+    """Sends a message on a fanout exchange to a specific server."""
+    return rpc_amqp.fanout_cast_to_server(
+        conf, context, server_params, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def notify(conf, context, topic, msg, envelope):
+    """Sends a notification event on a topic."""
+    return rpc_amqp.notify(
+        conf, context, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection),
+        envelope)
+
+
+def cleanup():
+    return rpc_amqp.cleanup(Connection.pool)
diff --git a/conductor/openstack/common/rpc/impl_qpid.py b/conductor/openstack/common/rpc/impl_qpid.py
new file mode 100644
index 0000000..0067ca4
--- /dev/null
+++ b/conductor/openstack/common/rpc/impl_qpid.py
@@ -0,0 +1,739 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2011 OpenStack Foundation
+#    Copyright 2011 - 2012, Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import functools
+import itertools
+import time
+import uuid
+
+import eventlet
+import greenlet
+from oslo.config import cfg
+
+from conductor.openstack.common import excutils
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import importutils
+from conductor.openstack.common import jsonutils
+from conductor.openstack.common import log as logging
+from conductor.openstack.common.rpc import amqp as rpc_amqp
+from conductor.openstack.common.rpc import common as rpc_common
+
+qpid_codec = importutils.try_import("qpid.codec010")
+qpid_messaging = importutils.try_import("qpid.messaging")
+qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
+
+LOG = logging.getLogger(__name__)
+
+qpid_opts = [
+    cfg.StrOpt('qpid_hostname',
+               default='localhost',
+               help='Qpid broker hostname'),
+    cfg.IntOpt('qpid_port',
+               default=5672,
+               help='Qpid broker port'),
+    cfg.ListOpt('qpid_hosts',
+                default=['$qpid_hostname:$qpid_port'],
+                help='Qpid HA cluster host:port pairs'),
+    cfg.StrOpt('qpid_username',
+               default='',
+               help='Username for qpid connection'),
+    cfg.StrOpt('qpid_password',
+               default='',
+               help='Password for qpid connection',
+               secret=True),
+    cfg.StrOpt('qpid_sasl_mechanisms',
+               default='',
+               help='Space separated list of SASL mechanisms to use for auth'),
+    cfg.IntOpt('qpid_heartbeat',
+               default=60,
+               help='Seconds between connection keepalive heartbeats'),
+    cfg.StrOpt('qpid_protocol',
+               default='tcp',
+               help="Transport to use, either 'tcp' or 'ssl'"),
+    cfg.BoolOpt('qpid_tcp_nodelay',
+                default=True,
+                help='Disable Nagle algorithm'),
+]
+
+cfg.CONF.register_opts(qpid_opts)
+
+JSON_CONTENT_TYPE = 'application/json; charset=utf8'
+
+
+class ConsumerBase(object):
+    """Consumer base class."""
+
+    def __init__(self, session, callback, node_name, node_opts,
+                 link_name, link_opts):
+        """Declare a queue on an amqp session.
+
+        'session' is the amqp session to use
+        'callback' is the callback to call when messages are received
+        'node_name' is the first part of the Qpid address string, before ';'
+        'node_opts' will be applied to the "x-declare" section of "node"
+                    in the address string.
+        'link_name' goes into the "name" field of the "link" in the address
+                    string
+        'link_opts' will be applied to the "x-declare" section of "link"
+                    in the address string.
+        """
+        self.callback = callback
+        self.receiver = None
+        self.session = None
+
+        addr_opts = {
+            "create": "always",
+            "node": {
+                "type": "topic",
+                "x-declare": {
+                    "durable": True,
+                    "auto-delete": True,
+                },
+            },
+            "link": {
+                "name": link_name,
+                "durable": True,
+                "x-declare": {
+                    "durable": False,
+                    "auto-delete": True,
+                    "exclusive": False,
+                },
+            },
+        }
+        addr_opts["node"]["x-declare"].update(node_opts)
+        addr_opts["link"]["x-declare"].update(link_opts)
+
+        self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
+
+        self.connect(session)
+
+    def connect(self, session):
+        """Declare the reciever on connect."""
+        self._declare_receiver(session)
+
+    def reconnect(self, session):
+        """Re-declare the receiver after a qpid reconnect."""
+        self._declare_receiver(session)
+
+    def _declare_receiver(self, session):
+        self.session = session
+        self.receiver = session.receiver(self.address)
+        self.receiver.capacity = 1
+
+    def _unpack_json_msg(self, msg):
+        """Load the JSON data in msg if msg.content_type indicates that it
+           is necessary.  Put the loaded data back into msg.content and
+           update msg.content_type appropriately.
+
+        A Qpid Message containing a dict will have a content_type of
+        'amqp/map', whereas one containing a string that needs to be converted
+        back from JSON will have a content_type of JSON_CONTENT_TYPE.
+
+        :param msg: a Qpid Message object
+        :returns: None
+        """
+        if msg.content_type == JSON_CONTENT_TYPE:
+            msg.content = jsonutils.loads(msg.content)
+            msg.content_type = 'amqp/map'
+
+    def consume(self):
+        """Fetch the message and pass it to the callback object."""
+        message = self.receiver.fetch()
+        try:
+            self._unpack_json_msg(message)
+            msg = rpc_common.deserialize_msg(message.content)
+            self.callback(msg)
+        except Exception:
+            LOG.exception(_("Failed to process message... skipping it."))
+        finally:
+            # TODO(sandy): Need support for optional ack_on_error.
+            self.session.acknowledge(message)
+
+    def get_receiver(self):
+        return self.receiver
+
+    def get_node_name(self):
+        return self.address.split(';')[0]
+
+
+class DirectConsumer(ConsumerBase):
+    """Queue/consumer class for 'direct'."""
+
+    def __init__(self, conf, session, msg_id, callback):
+        """Init a 'direct' queue.
+
+        'session' is the amqp session to use
+        'msg_id' is the msg_id to listen on
+        'callback' is the callback to call when messages are received
+        """
+
+        super(DirectConsumer, self).__init__(
+            session, callback,
+            "%s/%s" % (msg_id, msg_id),
+            {"type": "direct"},
+            msg_id,
+            {
+                "auto-delete": conf.amqp_auto_delete,
+                "exclusive": True,
+                "durable": conf.amqp_durable_queues,
+            })
+
+
+class TopicConsumer(ConsumerBase):
+    """Consumer class for 'topic'."""
+
+    def __init__(self, conf, session, topic, callback, name=None,
+                 exchange_name=None):
+        """Init a 'topic' queue.
+
+        :param session: the amqp session to use
+        :param topic: is the topic to listen on
+        :paramtype topic: str
+        :param callback: the callback to call when messages are received
+        :param name: optional queue name, defaults to topic
+        """
+
+        exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
+        super(TopicConsumer, self).__init__(
+            session, callback,
+            "%s/%s" % (exchange_name, topic),
+            {}, name or topic,
+            {
+                "auto-delete": conf.amqp_auto_delete,
+                "durable": conf.amqp_durable_queues,
+            })
+
+
+class FanoutConsumer(ConsumerBase):
+    """Consumer class for 'fanout'."""
+
+    def __init__(self, conf, session, topic, callback):
+        """Init a 'fanout' queue.
+
+        'session' is the amqp session to use
+        'topic' is the topic to listen on
+        'callback' is the callback to call when messages are received
+        """
+        self.conf = conf
+
+        super(FanoutConsumer, self).__init__(
+            session, callback,
+            "%s_fanout" % topic,
+            {"durable": False, "type": "fanout"},
+            "%s_fanout_%s" % (topic, uuid.uuid4().hex),
+            {"exclusive": True})
+
+    def reconnect(self, session):
+        topic = self.get_node_name().rpartition('_fanout')[0]
+        params = {
+            'session': session,
+            'topic': topic,
+            'callback': self.callback,
+        }
+
+        self.__init__(conf=self.conf, **params)
+
+        super(FanoutConsumer, self).reconnect(session)
+
+
+class Publisher(object):
+    """Base Publisher class."""
+
+    def __init__(self, session, node_name, node_opts=None):
+        """Init the Publisher class with the exchange_name, routing_key,
+        and other options
+        """
+        self.sender = None
+        self.session = session
+
+        addr_opts = {
+            "create": "always",
+            "node": {
+                "type": "topic",
+                "x-declare": {
+                    "durable": False,
+                    # auto-delete isn't implemented for exchanges in qpid,
+                    # but put in here anyway
+                    "auto-delete": True,
+                },
+            },
+        }
+        if node_opts:
+            addr_opts["node"]["x-declare"].update(node_opts)
+
+        self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
+
+        self.reconnect(session)
+
+    def reconnect(self, session):
+        """Re-establish the Sender after a reconnection."""
+        self.sender = session.sender(self.address)
+
+    def _pack_json_msg(self, msg):
+        """Qpid cannot serialize dicts containing strings longer than 65535
+           characters.  This function dumps the message content to a JSON
+           string, which Qpid is able to handle.
+
+        :param msg: May be either a Qpid Message object or a bare dict.
+        :returns: A Qpid Message with its content field JSON encoded.
+        """
+        try:
+            msg.content = jsonutils.dumps(msg.content)
+        except AttributeError:
+            # Need to have a Qpid message so we can set the content_type.
+            msg = qpid_messaging.Message(jsonutils.dumps(msg))
+        msg.content_type = JSON_CONTENT_TYPE
+        return msg
+
+    def send(self, msg):
+        """Send a message."""
+        try:
+            # Check if Qpid can encode the message
+            check_msg = msg
+            if not hasattr(check_msg, 'content_type'):
+                check_msg = qpid_messaging.Message(msg)
+            content_type = check_msg.content_type
+            enc, dec = qpid_messaging.message.get_codec(content_type)
+            enc(check_msg.content)
+        except qpid_codec.CodecException:
+            # This means the message couldn't be serialized as a dict.
+            msg = self._pack_json_msg(msg)
+        self.sender.send(msg)
+
+
+class DirectPublisher(Publisher):
+    """Publisher class for 'direct'."""
+    def __init__(self, conf, session, msg_id):
+        """Init a 'direct' publisher."""
+        super(DirectPublisher, self).__init__(session, msg_id,
+                                              {"type": "Direct"})
+
+
+class TopicPublisher(Publisher):
+    """Publisher class for 'topic'."""
+    def __init__(self, conf, session, topic):
+        """init a 'topic' publisher.
+        """
+        exchange_name = rpc_amqp.get_control_exchange(conf)
+        super(TopicPublisher, self).__init__(session,
+                                             "%s/%s" % (exchange_name, topic))
+
+
+class FanoutPublisher(Publisher):
+    """Publisher class for 'fanout'."""
+    def __init__(self, conf, session, topic):
+        """init a 'fanout' publisher.
+        """
+        super(FanoutPublisher, self).__init__(
+            session,
+            "%s_fanout" % topic, {"type": "fanout"})
+
+
+class NotifyPublisher(Publisher):
+    """Publisher class for notifications."""
+    def __init__(self, conf, session, topic):
+        """init a 'topic' publisher.
+        """
+        exchange_name = rpc_amqp.get_control_exchange(conf)
+        super(NotifyPublisher, self).__init__(session,
+                                              "%s/%s" % (exchange_name, topic),
+                                              {"durable": True})
+
+
+class Connection(object):
+    """Connection object."""
+
+    pool = None
+
+    def __init__(self, conf, server_params=None):
+        if not qpid_messaging:
+            raise ImportError("Failed to import qpid.messaging")
+
+        self.session = None
+        self.consumers = {}
+        self.consumer_thread = None
+        self.proxy_callbacks = []
+        self.conf = conf
+
+        if server_params and 'hostname' in server_params:
+            # NOTE(russellb) This enables support for cast_to_server.
+            server_params['qpid_hosts'] = [
+                '%s:%d' % (server_params['hostname'],
+                           server_params.get('port', 5672))
+            ]
+
+        params = {
+            'qpid_hosts': self.conf.qpid_hosts,
+            'username': self.conf.qpid_username,
+            'password': self.conf.qpid_password,
+        }
+        params.update(server_params or {})
+
+        self.brokers = params['qpid_hosts']
+        self.username = params['username']
+        self.password = params['password']
+        self.connection_create(self.brokers[0])
+        self.reconnect()
+
+    def connection_create(self, broker):
+        # Create the connection - this does not open the connection
+        self.connection = qpid_messaging.Connection(broker)
+
+        # Check if flags are set and if so set them for the connection
+        # before we call open
+        self.connection.username = self.username
+        self.connection.password = self.password
+
+        self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
+        # Reconnection is done by self.reconnect()
+        self.connection.reconnect = False
+        self.connection.heartbeat = self.conf.qpid_heartbeat
+        self.connection.transport = self.conf.qpid_protocol
+        self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
+
+    def _register_consumer(self, consumer):
+        self.consumers[str(consumer.get_receiver())] = consumer
+
+    def _lookup_consumer(self, receiver):
+        return self.consumers[str(receiver)]
+
+    def reconnect(self):
+        """Handles reconnecting and re-establishing sessions and queues."""
+        attempt = 0
+        delay = 1
+        while True:
+            # Close the session if necessary
+            if self.connection.opened():
+                try:
+                    self.connection.close()
+                except qpid_exceptions.ConnectionError:
+                    pass
+
+            broker = self.brokers[attempt % len(self.brokers)]
+            attempt += 1
+
+            try:
+                self.connection_create(broker)
+                self.connection.open()
+            except qpid_exceptions.ConnectionError as e:
+                msg_dict = dict(e=e, delay=delay)
+                msg = _("Unable to connect to AMQP server: %(e)s. "
+                        "Sleeping %(delay)s seconds") % msg_dict
+                LOG.error(msg)
+                time.sleep(delay)
+                delay = min(2 * delay, 60)
+            else:
+                LOG.info(_('Connected to AMQP server on %s'), broker)
+                break
+
+        self.session = self.connection.session()
+
+        if self.consumers:
+            consumers = self.consumers
+            self.consumers = {}
+
+            for consumer in consumers.itervalues():
+                consumer.reconnect(self.session)
+                self._register_consumer(consumer)
+
+            LOG.debug(_("Re-established AMQP queues"))
+
+    def ensure(self, error_callback, method, *args, **kwargs):
+        while True:
+            try:
+                return method(*args, **kwargs)
+            except (qpid_exceptions.Empty,
+                    qpid_exceptions.ConnectionError) as e:
+                if error_callback:
+                    error_callback(e)
+                self.reconnect()
+
+    def close(self):
+        """Close/release this connection."""
+        self.cancel_consumer_thread()
+        self.wait_on_proxy_callbacks()
+        try:
+            self.connection.close()
+        except Exception:
+            # NOTE(dripton) Logging exceptions that happen during cleanup just
+            # causes confusion; there's really nothing useful we can do with
+            # them.
+            pass
+        self.connection = None
+
+    def reset(self):
+        """Reset a connection so it can be used again."""
+        self.cancel_consumer_thread()
+        self.wait_on_proxy_callbacks()
+        self.session.close()
+        self.session = self.connection.session()
+        self.consumers = {}
+
+    def declare_consumer(self, consumer_cls, topic, callback):
+        """Create a Consumer using the class that was passed in and
+        add it to our list of consumers
+        """
+        def _connect_error(exc):
+            log_info = {'topic': topic, 'err_str': str(exc)}
+            LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
+                      "%(err_str)s") % log_info)
+
+        def _declare_consumer():
+            consumer = consumer_cls(self.conf, self.session, topic, callback)
+            self._register_consumer(consumer)
+            return consumer
+
+        return self.ensure(_connect_error, _declare_consumer)
+
+    def iterconsume(self, limit=None, timeout=None):
+        """Return an iterator that will consume from all queues/consumers."""
+
+        def _error_callback(exc):
+            if isinstance(exc, qpid_exceptions.Empty):
+                LOG.debug(_('Timed out waiting for RPC response: %s') %
+                          str(exc))
+                raise rpc_common.Timeout()
+            else:
+                LOG.exception(_('Failed to consume message from queue: %s') %
+                              str(exc))
+
+        def _consume():
+            nxt_receiver = self.session.next_receiver(timeout=timeout)
+            try:
+                self._lookup_consumer(nxt_receiver).consume()
+            except Exception:
+                LOG.exception(_("Error processing message.  Skipping it."))
+
+        for iteration in itertools.count(0):
+            if limit and iteration >= limit:
+                raise StopIteration
+            yield self.ensure(_error_callback, _consume)
+
+    def cancel_consumer_thread(self):
+        """Cancel a consumer thread."""
+        if self.consumer_thread is not None:
+            self.consumer_thread.kill()
+            try:
+                self.consumer_thread.wait()
+            except greenlet.GreenletExit:
+                pass
+            self.consumer_thread = None
+
+    def wait_on_proxy_callbacks(self):
+        """Wait for all proxy callback threads to exit."""
+        for proxy_cb in self.proxy_callbacks:
+            proxy_cb.wait()
+
+    def publisher_send(self, cls, topic, msg):
+        """Send to a publisher based on the publisher class."""
+
+        def _connect_error(exc):
+            log_info = {'topic': topic, 'err_str': str(exc)}
+            LOG.exception(_("Failed to publish message to topic "
+                          "'%(topic)s': %(err_str)s") % log_info)
+
+        def _publisher_send():
+            publisher = cls(self.conf, self.session, topic)
+            publisher.send(msg)
+
+        return self.ensure(_connect_error, _publisher_send)
+
+    def declare_direct_consumer(self, topic, callback):
+        """Create a 'direct' queue.
+        In nova's use, this is generally a msg_id queue used for
+        responses for call/multicall
+        """
+        self.declare_consumer(DirectConsumer, topic, callback)
+
+    def declare_topic_consumer(self, topic, callback=None, queue_name=None,
+                               exchange_name=None):
+        """Create a 'topic' consumer."""
+        self.declare_consumer(functools.partial(TopicConsumer,
+                                                name=queue_name,
+                                                exchange_name=exchange_name,
+                                                ),
+                              topic, callback)
+
+    def declare_fanout_consumer(self, topic, callback):
+        """Create a 'fanout' consumer."""
+        self.declare_consumer(FanoutConsumer, topic, callback)
+
+    def direct_send(self, msg_id, msg):
+        """Send a 'direct' message."""
+        self.publisher_send(DirectPublisher, msg_id, msg)
+
+    def topic_send(self, topic, msg, timeout=None):
+        """Send a 'topic' message."""
+        #
+        # We want to create a message with attributes, e.g. a TTL. We
+        # don't really need to keep 'msg' in its JSON format any longer
+        # so let's create an actual qpid message here and get some
+        # value-add on the go.
+        #
+        # WARNING: Request timeout happens to be in the same units as
+        # qpid's TTL (seconds). If this changes in the future, then this
+        # will need to be altered accordingly.
+        #
+        qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
+        self.publisher_send(TopicPublisher, topic, qpid_message)
+
+    def fanout_send(self, topic, msg):
+        """Send a 'fanout' message."""
+        self.publisher_send(FanoutPublisher, topic, msg)
+
+    def notify_send(self, topic, msg, **kwargs):
+        """Send a notify message on a topic."""
+        self.publisher_send(NotifyPublisher, topic, msg)
+
+    def consume(self, limit=None):
+        """Consume from all queues/consumers."""
+        it = self.iterconsume(limit=limit)
+        while True:
+            try:
+                it.next()
+            except StopIteration:
+                return
+
+    def consume_in_thread(self):
+        """Consumer from all queues/consumers in a greenthread."""
+        @excutils.forever_retry_uncaught_exceptions
+        def _consumer_thread():
+            try:
+                self.consume()
+            except greenlet.GreenletExit:
+                return
+        if self.consumer_thread is None:
+            self.consumer_thread = eventlet.spawn(_consumer_thread)
+        return self.consumer_thread
+
+    def create_consumer(self, topic, proxy, fanout=False):
+        """Create a consumer that calls a method in a proxy object."""
+        proxy_cb = rpc_amqp.ProxyCallback(
+            self.conf, proxy,
+            rpc_amqp.get_connection_pool(self.conf, Connection))
+        self.proxy_callbacks.append(proxy_cb)
+
+        if fanout:
+            consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
+        else:
+            consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
+
+        self._register_consumer(consumer)
+
+        return consumer
+
+    def create_worker(self, topic, proxy, pool_name):
+        """Create a worker that calls a method in a proxy object."""
+        proxy_cb = rpc_amqp.ProxyCallback(
+            self.conf, proxy,
+            rpc_amqp.get_connection_pool(self.conf, Connection))
+        self.proxy_callbacks.append(proxy_cb)
+
+        consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
+                                 name=pool_name)
+
+        self._register_consumer(consumer)
+
+        return consumer
+
+    def join_consumer_pool(self, callback, pool_name, topic,
+                           exchange_name=None, ack_on_error=True):
+        """Register as a member of a group of consumers for a given topic from
+        the specified exchange.
+
+        Exactly one member of a given pool will receive each message.
+
+        A message will be delivered to multiple pools, if more than
+        one is created.
+        """
+        callback_wrapper = rpc_amqp.CallbackWrapper(
+            conf=self.conf,
+            callback=callback,
+            connection_pool=rpc_amqp.get_connection_pool(self.conf,
+                                                         Connection),
+        )
+        self.proxy_callbacks.append(callback_wrapper)
+
+        consumer = TopicConsumer(conf=self.conf,
+                                 session=self.session,
+                                 topic=topic,
+                                 callback=callback_wrapper,
+                                 name=pool_name,
+                                 exchange_name=exchange_name)
+
+        self._register_consumer(consumer)
+        return consumer
+
+
+def create_connection(conf, new=True):
+    """Create a connection."""
+    return rpc_amqp.create_connection(
+        conf, new,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def multicall(conf, context, topic, msg, timeout=None):
+    """Make a call that returns multiple times."""
+    return rpc_amqp.multicall(
+        conf, context, topic, msg, timeout,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def call(conf, context, topic, msg, timeout=None):
+    """Sends a message on a topic and wait for a response."""
+    return rpc_amqp.call(
+        conf, context, topic, msg, timeout,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast(conf, context, topic, msg):
+    """Sends a message on a topic without waiting for a response."""
+    return rpc_amqp.cast(
+        conf, context, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast(conf, context, topic, msg):
+    """Sends a message on a fanout exchange without waiting for a response."""
+    return rpc_amqp.fanout_cast(
+        conf, context, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast_to_server(conf, context, server_params, topic, msg):
+    """Sends a message on a topic to a specific server."""
+    return rpc_amqp.cast_to_server(
+        conf, context, server_params, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast_to_server(conf, context, server_params, topic, msg):
+    """Sends a message on a fanout exchange to a specific server."""
+    return rpc_amqp.fanout_cast_to_server(
+        conf, context, server_params, topic, msg,
+        rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def notify(conf, context, topic, msg, envelope):
+    """Sends a notification event on a topic."""
+    return rpc_amqp.notify(conf, context, topic, msg,
+                           rpc_amqp.get_connection_pool(conf, Connection),
+                           envelope)
+
+
+def cleanup():
+    return rpc_amqp.cleanup(Connection.pool)
diff --git a/conductor/openstack/common/rpc/impl_zmq.py b/conductor/openstack/common/rpc/impl_zmq.py
new file mode 100644
index 0000000..38670be
--- /dev/null
+++ b/conductor/openstack/common/rpc/impl_zmq.py
@@ -0,0 +1,817 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2011 Cloudscaling Group, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import pprint
+import re
+import socket
+import sys
+import types
+import uuid
+
+import eventlet
+import greenlet
+from oslo.config import cfg
+
+from conductor.openstack.common import excutils
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import importutils
+from conductor.openstack.common import jsonutils
+from conductor.openstack.common.rpc import common as rpc_common
+
+zmq = importutils.try_import('eventlet.green.zmq')
+
+# for convenience, are not modified.
+pformat = pprint.pformat
+Timeout = eventlet.timeout.Timeout
+LOG = rpc_common.LOG
+RemoteError = rpc_common.RemoteError
+RPCException = rpc_common.RPCException
+
+zmq_opts = [
+    cfg.StrOpt('rpc_zmq_bind_address', default='*',
+               help='ZeroMQ bind address. Should be a wildcard (*), '
+                    'an ethernet interface, or IP. '
+                    'The "host" option should point or resolve to this '
+                    'address.'),
+
+    # The module.Class to use for matchmaking.
+    cfg.StrOpt(
+        'rpc_zmq_matchmaker',
+        default=('conductor.openstack.common.rpc.'
+                 'matchmaker.MatchMakerLocalhost'),
+        help='MatchMaker driver',
+    ),
+
+    # The following port is unassigned by IANA as of 2012-05-21
+    cfg.IntOpt('rpc_zmq_port', default=9501,
+               help='ZeroMQ receiver listening port'),
+
+    cfg.IntOpt('rpc_zmq_contexts', default=1,
+               help='Number of ZeroMQ contexts, defaults to 1'),
+
+    cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
+               help='Maximum number of ingress messages to locally buffer '
+                    'per topic. Default is unlimited.'),
+
+    cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
+               help='Directory for holding IPC sockets'),
+
+    cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
+               help='Name of this node. Must be a valid hostname, FQDN, or '
+                    'IP address. Must match "host" option, if running Nova.')
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(zmq_opts)
+
+ZMQ_CTX = None  # ZeroMQ Context, must be global.
+matchmaker = None  # memoized matchmaker object
+
+
+def _serialize(data):
+    """Serialization wrapper.
+
+    We prefer using JSON, but it cannot encode all types.
+    Error if a developer passes us bad data.
+    """
+    try:
+        return jsonutils.dumps(data, ensure_ascii=True)
+    except TypeError:
+        with excutils.save_and_reraise_exception():
+            LOG.error(_("JSON serialization failed."))
+
+
+def _deserialize(data):
+    """Deserialization wrapper."""
+    LOG.debug(_("Deserializing: %s"), data)
+    return jsonutils.loads(data)
+
+
+class ZmqSocket(object):
+    """A tiny wrapper around ZeroMQ.
+
+    Simplifies the send/recv protocol and connection management.
+    Can be used as a Context (supports the 'with' statement).
+    """
+
+    def __init__(self, addr, zmq_type, bind=True, subscribe=None):
+        self.sock = _get_ctxt().socket(zmq_type)
+        self.addr = addr
+        self.type = zmq_type
+        self.subscriptions = []
+
+        # Support failures on sending/receiving on wrong socket type.
+        self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
+        self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
+        self.can_sub = zmq_type in (zmq.SUB, )
+
+        # Support list, str, & None for subscribe arg (cast to list)
+        do_sub = {
+            list: subscribe,
+            str: [subscribe],
+            type(None): []
+        }[type(subscribe)]
+
+        for f in do_sub:
+            self.subscribe(f)
+
+        str_data = {'addr': addr, 'type': self.socket_s(),
+                    'subscribe': subscribe, 'bind': bind}
+
+        LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
+        LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
+        LOG.debug(_("-> bind: %(bind)s"), str_data)
+
+        try:
+            if bind:
+                self.sock.bind(addr)
+            else:
+                self.sock.connect(addr)
+        except Exception:
+            raise RPCException(_("Could not open socket."))
+
+    def socket_s(self):
+        """Get socket type as string."""
+        t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
+                  'DEALER')
+        return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
+
+    def subscribe(self, msg_filter):
+        """Subscribe."""
+        if not self.can_sub:
+            raise RPCException("Cannot subscribe on this socket.")
+        LOG.debug(_("Subscribing to %s"), msg_filter)
+
+        try:
+            self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
+        except Exception:
+            return
+
+        self.subscriptions.append(msg_filter)
+
+    def unsubscribe(self, msg_filter):
+        """Unsubscribe."""
+        if msg_filter not in self.subscriptions:
+            return
+        self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
+        self.subscriptions.remove(msg_filter)
+
+    def close(self):
+        if self.sock is None or self.sock.closed:
+            return
+
+        # We must unsubscribe, or we'll leak descriptors.
+        if self.subscriptions:
+            for f in self.subscriptions:
+                try:
+                    self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
+                except Exception:
+                    pass
+            self.subscriptions = []
+
+        try:
+            # Default is to linger
+            self.sock.close()
+        except Exception:
+            # While this is a bad thing to happen,
+            # it would be much worse if some of the code calling this
+            # were to fail. For now, lets log, and later evaluate
+            # if we can safely raise here.
+            LOG.error("ZeroMQ socket could not be closed.")
+        self.sock = None
+
+    def recv(self, **kwargs):
+        if not self.can_recv:
+            raise RPCException(_("You cannot recv on this socket."))
+        return self.sock.recv_multipart(**kwargs)
+
+    def send(self, data, **kwargs):
+        if not self.can_send:
+            raise RPCException(_("You cannot send on this socket."))
+        self.sock.send_multipart(data, **kwargs)
+
+
+class ZmqClient(object):
+    """Client for ZMQ sockets."""
+
+    def __init__(self, addr):
+        self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
+
+    def cast(self, msg_id, topic, data, envelope):
+        msg_id = msg_id or 0
+
+        if not envelope:
+            self.outq.send(map(bytes,
+                           (msg_id, topic, 'cast', _serialize(data))))
+            return
+
+        rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
+        zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
+        self.outq.send(map(bytes,
+                       (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
+
+    def close(self):
+        self.outq.close()
+
+
+class RpcContext(rpc_common.CommonRpcContext):
+    """Context that supports replying to a rpc.call."""
+    def __init__(self, **kwargs):
+        self.replies = []
+        super(RpcContext, self).__init__(**kwargs)
+
+    def deepcopy(self):
+        values = self.to_dict()
+        values['replies'] = self.replies
+        return self.__class__(**values)
+
+    def reply(self, reply=None, failure=None, ending=False):
+        if ending:
+            return
+        self.replies.append(reply)
+
+    @classmethod
+    def marshal(self, ctx):
+        ctx_data = ctx.to_dict()
+        return _serialize(ctx_data)
+
+    @classmethod
+    def unmarshal(self, data):
+        return RpcContext.from_dict(_deserialize(data))
+
+
+class InternalContext(object):
+    """Used by ConsumerBase as a private context for - methods."""
+
+    def __init__(self, proxy):
+        self.proxy = proxy
+        self.msg_waiter = None
+
+    def _get_response(self, ctx, proxy, topic, data):
+        """Process a curried message and cast the result to topic."""
+        LOG.debug(_("Running func with context: %s"), ctx.to_dict())
+        data.setdefault('version', None)
+        data.setdefault('args', {})
+
+        try:
+            result = proxy.dispatch(
+                ctx, data['version'], data['method'],
+                data.get('namespace'), **data['args'])
+            return ConsumerBase.normalize_reply(result, ctx.replies)
+        except greenlet.GreenletExit:
+            # ignore these since they are just from shutdowns
+            pass
+        except rpc_common.ClientException as e:
+            LOG.debug(_("Expected exception during message handling (%s)") %
+                      e._exc_info[1])
+            return {'exc':
+                    rpc_common.serialize_remote_exception(e._exc_info,
+                                                          log_failure=False)}
+        except Exception:
+            LOG.error(_("Exception during message handling"))
+            return {'exc':
+                    rpc_common.serialize_remote_exception(sys.exc_info())}
+
+    def reply(self, ctx, proxy,
+              msg_id=None, context=None, topic=None, msg=None):
+        """Reply to a casted call."""
+        # NOTE(ewindisch): context kwarg exists for Grizzly compat.
+        #                  this may be able to be removed earlier than
+        #                  'I' if ConsumerBase.process were refactored.
+        if type(msg) is list:
+            payload = msg[-1]
+        else:
+            payload = msg
+
+        response = ConsumerBase.normalize_reply(
+            self._get_response(ctx, proxy, topic, payload),
+            ctx.replies)
+
+        LOG.debug(_("Sending reply"))
+        _multi_send(_cast, ctx, topic, {
+            'method': '-process_reply',
+            'args': {
+                'msg_id': msg_id,  # Include for Folsom compat.
+                'response': response
+            }
+        }, _msg_id=msg_id)
+
+
+class ConsumerBase(object):
+    """Base Consumer."""
+
+    def __init__(self):
+        self.private_ctx = InternalContext(None)
+
+    @classmethod
+    def normalize_reply(self, result, replies):
+        #TODO(ewindisch): re-evaluate and document this method.
+        if isinstance(result, types.GeneratorType):
+            return list(result)
+        elif replies:
+            return replies
+        else:
+            return [result]
+
+    def process(self, proxy, ctx, data):
+        data.setdefault('version', None)
+        data.setdefault('args', {})
+
+        # Method starting with - are
+        # processed internally. (non-valid method name)
+        method = data.get('method')
+        if not method:
+            LOG.error(_("RPC message did not include method."))
+            return
+
+        # Internal method
+        # uses internal context for safety.
+        if method == '-reply':
+            self.private_ctx.reply(ctx, proxy, **data['args'])
+            return
+
+        proxy.dispatch(ctx, data['version'],
+                       data['method'], data.get('namespace'), **data['args'])
+
+
+class ZmqBaseReactor(ConsumerBase):
+    """A consumer class implementing a centralized casting broker (PULL-PUSH).
+
+    Used for RoundRobin requests.
+    """
+
+    def __init__(self, conf):
+        super(ZmqBaseReactor, self).__init__()
+
+        self.proxies = {}
+        self.threads = []
+        self.sockets = []
+        self.subscribe = {}
+
+        self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
+
+    def register(self, proxy, in_addr, zmq_type_in,
+                 in_bind=True, subscribe=None):
+
+        LOG.info(_("Registering reactor"))
+
+        if zmq_type_in not in (zmq.PULL, zmq.SUB):
+            raise RPCException("Bad input socktype")
+
+        # Items push in.
+        inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
+                        subscribe=subscribe)
+
+        self.proxies[inq] = proxy
+        self.sockets.append(inq)
+
+        LOG.info(_("In reactor registered"))
+
+    def consume_in_thread(self):
+        def _consume(sock):
+            LOG.info(_("Consuming socket"))
+            while True:
+                self.consume(sock)
+
+        for k in self.proxies.keys():
+            self.threads.append(
+                self.pool.spawn(_consume, k)
+            )
+
+    def wait(self):
+        for t in self.threads:
+            t.wait()
+
+    def close(self):
+        for s in self.sockets:
+            s.close()
+
+        for t in self.threads:
+            t.kill()
+
+
+class ZmqProxy(ZmqBaseReactor):
+    """A consumer class implementing a topic-based proxy.
+
+    Forwards to IPC sockets.
+    """
+
+    def __init__(self, conf):
+        super(ZmqProxy, self).__init__(conf)
+        pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
+        self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
+
+        self.topic_proxy = {}
+
+    def consume(self, sock):
+        ipc_dir = CONF.rpc_zmq_ipc_dir
+
+        data = sock.recv(copy=False)
+        topic = data[1].bytes
+
+        if topic.startswith('fanout~'):
+            sock_type = zmq.PUB
+            topic = topic.split('.', 1)[0]
+        elif topic.startswith('zmq_replies'):
+            sock_type = zmq.PUB
+        else:
+            sock_type = zmq.PUSH
+
+        if topic not in self.topic_proxy:
+            def publisher(waiter):
+                LOG.info(_("Creating proxy for topic: %s"), topic)
+
+                try:
+                    # The topic is received over the network,
+                    # don't trust this input.
+                    if self.badchars.search(topic) is not None:
+                        emsg = _("Topic contained dangerous characters.")
+                        LOG.warn(emsg)
+                        raise RPCException(emsg)
+
+                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
+                                         (ipc_dir, topic),
+                                         sock_type, bind=True)
+                except RPCException:
+                    waiter.send_exception(*sys.exc_info())
+                    return
+
+                self.topic_proxy[topic] = eventlet.queue.LightQueue(
+                    CONF.rpc_zmq_topic_backlog)
+                self.sockets.append(out_sock)
+
+                # It takes some time for a pub socket to open,
+                # before we can have any faith in doing a send() to it.
+                if sock_type == zmq.PUB:
+                    eventlet.sleep(.5)
+
+                waiter.send(True)
+
+                while(True):
+                    data = self.topic_proxy[topic].get()
+                    out_sock.send(data, copy=False)
+
+            wait_sock_creation = eventlet.event.Event()
+            eventlet.spawn(publisher, wait_sock_creation)
+
+            try:
+                wait_sock_creation.wait()
+            except RPCException:
+                LOG.error(_("Topic socket file creation failed."))
+                return
+
+        try:
+            self.topic_proxy[topic].put_nowait(data)
+        except eventlet.queue.Full:
+            LOG.error(_("Local per-topic backlog buffer full for topic "
+                        "%(topic)s. Dropping message.") % {'topic': topic})
+
+    def consume_in_thread(self):
+        """Runs the ZmqProxy service."""
+        ipc_dir = CONF.rpc_zmq_ipc_dir
+        consume_in = "tcp://%s:%s" % \
+            (CONF.rpc_zmq_bind_address,
+             CONF.rpc_zmq_port)
+        consumption_proxy = InternalContext(None)
+
+        try:
+            os.makedirs(ipc_dir)
+        except os.error:
+            if not os.path.isdir(ipc_dir):
+                with excutils.save_and_reraise_exception():
+                    LOG.error(_("Required IPC directory does not exist at"
+                                " %s") % (ipc_dir, ))
+        try:
+            self.register(consumption_proxy,
+                          consume_in,
+                          zmq.PULL)
+        except zmq.ZMQError:
+            if os.access(ipc_dir, os.X_OK):
+                with excutils.save_and_reraise_exception():
+                    LOG.error(_("Permission denied to IPC directory at"
+                                " %s") % (ipc_dir, ))
+            with excutils.save_and_reraise_exception():
+                LOG.error(_("Could not create ZeroMQ receiver daemon. "
+                            "Socket may already be in use."))
+
+        super(ZmqProxy, self).consume_in_thread()
+
+
+def unflatten_envelope(packenv):
+    """Unflattens the RPC envelope.
+
+    Takes a list and returns a dictionary.
+    i.e. [1,2,3,4] => {1: 2, 3: 4}
+    """
+    i = iter(packenv)
+    h = {}
+    try:
+        while True:
+            k = i.next()
+            h[k] = i.next()
+    except StopIteration:
+        return h
+
+
+class ZmqReactor(ZmqBaseReactor):
+    """A consumer class implementing a consumer for messages.
+
+    Can also be used as a 1:1 proxy
+    """
+
+    def __init__(self, conf):
+        super(ZmqReactor, self).__init__(conf)
+
+    def consume(self, sock):
+        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
+        data = sock.recv()
+        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
+
+        proxy = self.proxies[sock]
+
+        if data[2] == 'cast':  # Legacy protocol
+            packenv = data[3]
+
+            ctx, msg = _deserialize(packenv)
+            request = rpc_common.deserialize_msg(msg)
+            ctx = RpcContext.unmarshal(ctx)
+        elif data[2] == 'impl_zmq_v2':
+            packenv = data[4:]
+
+            msg = unflatten_envelope(packenv)
+            request = rpc_common.deserialize_msg(msg)
+
+            # Unmarshal only after verifying the message.
+            ctx = RpcContext.unmarshal(data[3])
+        else:
+            LOG.error(_("ZMQ Envelope version unsupported or unknown."))
+            return
+
+        self.pool.spawn_n(self.process, proxy, ctx, request)
+
+
+class Connection(rpc_common.Connection):
+    """Manages connections and threads."""
+
+    def __init__(self, conf):
+        self.topics = []
+        self.reactor = ZmqReactor(conf)
+
+    def create_consumer(self, topic, proxy, fanout=False):
+        # Register with matchmaker.
+        _get_matchmaker().register(topic, CONF.rpc_zmq_host)
+
+        # Subscription scenarios
+        if fanout:
+            sock_type = zmq.SUB
+            subscribe = ('', fanout)[type(fanout) == str]
+            topic = 'fanout~' + topic.split('.', 1)[0]
+        else:
+            sock_type = zmq.PULL
+            subscribe = None
+            topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
+
+        if topic in self.topics:
+            LOG.info(_("Skipping topic registration. Already registered."))
+            return
+
+        # Receive messages from (local) proxy
+        inaddr = "ipc://%s/zmq_topic_%s" % \
+            (CONF.rpc_zmq_ipc_dir, topic)
+
+        LOG.debug(_("Consumer is a zmq.%s"),
+                  ['PULL', 'SUB'][sock_type == zmq.SUB])
+
+        self.reactor.register(proxy, inaddr, sock_type,
+                              subscribe=subscribe, in_bind=False)
+        self.topics.append(topic)
+
+    def close(self):
+        _get_matchmaker().stop_heartbeat()
+        for topic in self.topics:
+            _get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
+
+        self.reactor.close()
+        self.topics = []
+
+    def wait(self):
+        self.reactor.wait()
+
+    def consume_in_thread(self):
+        _get_matchmaker().start_heartbeat()
+        self.reactor.consume_in_thread()
+
+
+def _cast(addr, context, topic, msg, timeout=None, envelope=False,
+          _msg_id=None):
+    timeout_cast = timeout or CONF.rpc_cast_timeout
+    payload = [RpcContext.marshal(context), msg]
+
+    with Timeout(timeout_cast, exception=rpc_common.Timeout):
+        try:
+            conn = ZmqClient(addr)
+
+            # assumes cast can't return an exception
+            conn.cast(_msg_id, topic, payload, envelope)
+        except zmq.ZMQError:
+            raise RPCException("Cast failed. ZMQ Socket Exception")
+        finally:
+            if 'conn' in vars():
+                conn.close()
+
+
+def _call(addr, context, topic, msg, timeout=None,
+          envelope=False):
+    # timeout_response is how long we wait for a response
+    timeout = timeout or CONF.rpc_response_timeout
+
+    # The msg_id is used to track replies.
+    msg_id = uuid.uuid4().hex
+
+    # Replies always come into the reply service.
+    reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
+
+    LOG.debug(_("Creating payload"))
+    # Curry the original request into a reply method.
+    mcontext = RpcContext.marshal(context)
+    payload = {
+        'method': '-reply',
+        'args': {
+            'msg_id': msg_id,
+            'topic': reply_topic,
+            # TODO(ewindisch): safe to remove mcontext in I.
+            'msg': [mcontext, msg]
+        }
+    }
+
+    LOG.debug(_("Creating queue socket for reply waiter"))
+
+    # Messages arriving async.
+    # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
+    with Timeout(timeout, exception=rpc_common.Timeout):
+        try:
+            msg_waiter = ZmqSocket(
+                "ipc://%s/zmq_topic_zmq_replies.%s" %
+                (CONF.rpc_zmq_ipc_dir,
+                 CONF.rpc_zmq_host),
+                zmq.SUB, subscribe=msg_id, bind=False
+            )
+
+            LOG.debug(_("Sending cast"))
+            _cast(addr, context, topic, payload, envelope)
+
+            LOG.debug(_("Cast sent; Waiting reply"))
+            # Blocks until receives reply
+            msg = msg_waiter.recv()
+            LOG.debug(_("Received message: %s"), msg)
+            LOG.debug(_("Unpacking response"))
+
+            if msg[2] == 'cast':  # Legacy version
+                raw_msg = _deserialize(msg[-1])[-1]
+            elif msg[2] == 'impl_zmq_v2':
+                rpc_envelope = unflatten_envelope(msg[4:])
+                raw_msg = rpc_common.deserialize_msg(rpc_envelope)
+            else:
+                raise rpc_common.UnsupportedRpcEnvelopeVersion(
+                    _("Unsupported or unknown ZMQ envelope returned."))
+
+            responses = raw_msg['args']['response']
+        # ZMQError trumps the Timeout error.
+        except zmq.ZMQError:
+            raise RPCException("ZMQ Socket Error")
+        except (IndexError, KeyError):
+            raise RPCException(_("RPC Message Invalid."))
+        finally:
+            if 'msg_waiter' in vars():
+                msg_waiter.close()
+
+    # It seems we don't need to do all of the following,
+    # but perhaps it would be useful for multicall?
+    # One effect of this is that we're checking all
+    # responses for Exceptions.
+    for resp in responses:
+        if isinstance(resp, types.DictType) and 'exc' in resp:
+            raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
+
+    return responses[-1]
+
+
+def _multi_send(method, context, topic, msg, timeout=None,
+                envelope=False, _msg_id=None):
+    """Wraps the sending of messages.
+
+    Dispatches to the matchmaker and sends message to all relevant hosts.
+    """
+    conf = CONF
+    LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
+
+    queues = _get_matchmaker().queues(topic)
+    LOG.debug(_("Sending message(s) to: %s"), queues)
+
+    # Don't stack if we have no matchmaker results
+    if not queues:
+        LOG.warn(_("No matchmaker results. Not casting."))
+        # While not strictly a timeout, callers know how to handle
+        # this exception and a timeout isn't too big a lie.
+        raise rpc_common.Timeout(_("No match from matchmaker."))
+
+    # This supports brokerless fanout (addresses > 1)
+    for queue in queues:
+        (_topic, ip_addr) = queue
+        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
+
+        if method.__name__ == '_cast':
+            eventlet.spawn_n(method, _addr, context,
+                             _topic, msg, timeout, envelope,
+                             _msg_id)
+            return
+        return method(_addr, context, _topic, msg, timeout,
+                      envelope)
+
+
+def create_connection(conf, new=True):
+    return Connection(conf)
+
+
+def multicall(conf, *args, **kwargs):
+    """Multiple calls."""
+    return _multi_send(_call, *args, **kwargs)
+
+
+def call(conf, *args, **kwargs):
+    """Send a message, expect a response."""
+    data = _multi_send(_call, *args, **kwargs)
+    return data[-1]
+
+
+def cast(conf, *args, **kwargs):
+    """Send a message expecting no reply."""
+    _multi_send(_cast, *args, **kwargs)
+
+
+def fanout_cast(conf, context, topic, msg, **kwargs):
+    """Send a message to all listening and expect no reply."""
+    # NOTE(ewindisch): fanout~ is used because it avoid splitting on .
+    # and acts as a non-subtle hint to the matchmaker and ZmqProxy.
+    _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
+
+
+def notify(conf, context, topic, msg, envelope):
+    """Send notification event.
+
+    Notifications are sent to topic-priority.
+    This differs from the AMQP drivers which send to topic.priority.
+    """
+    # NOTE(ewindisch): dot-priority in rpc notifier does not
+    # work with our assumptions.
+    topic = topic.replace('.', '-')
+    cast(conf, context, topic, msg, envelope=envelope)
+
+
+def cleanup():
+    """Clean up resources in use by implementation."""
+    global ZMQ_CTX
+    if ZMQ_CTX:
+        ZMQ_CTX.term()
+    ZMQ_CTX = None
+
+    global matchmaker
+    matchmaker = None
+
+
+def _get_ctxt():
+    if not zmq:
+        raise ImportError("Failed to import eventlet.green.zmq")
+
+    global ZMQ_CTX
+    if not ZMQ_CTX:
+        ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
+    return ZMQ_CTX
+
+
+def _get_matchmaker(*args, **kwargs):
+    global matchmaker
+    if not matchmaker:
+        mm = CONF.rpc_zmq_matchmaker
+        if mm.endswith('matchmaker.MatchMakerRing'):
+            mm.replace('matchmaker', 'matchmaker_ring')
+            LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
+                       ' %(new)s instead') % dict(
+                     orig=CONF.rpc_zmq_matchmaker, new=mm))
+        matchmaker = importutils.import_object(mm, *args, **kwargs)
+    return matchmaker
diff --git a/conductor/openstack/common/rpc/matchmaker.py b/conductor/openstack/common/rpc/matchmaker.py
new file mode 100644
index 0000000..d382753
--- /dev/null
+++ b/conductor/openstack/common/rpc/matchmaker.py
@@ -0,0 +1,330 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2011 Cloudscaling Group, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+The MatchMaker classes should except a Topic or Fanout exchange key and
+return keys for direct exchanges, per (approximate) AMQP parlance.
+"""
+
+import contextlib
+
+import eventlet
+from oslo.config import cfg
+
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import log as logging
+
+
+matchmaker_opts = [
+    cfg.IntOpt('matchmaker_heartbeat_freq',
+               default=300,
+               help='Heartbeat frequency'),
+    cfg.IntOpt('matchmaker_heartbeat_ttl',
+               default=600,
+               help='Heartbeat time-to-live.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(matchmaker_opts)
+LOG = logging.getLogger(__name__)
+contextmanager = contextlib.contextmanager
+
+
+class MatchMakerException(Exception):
+    """Signified a match could not be found."""
+    message = _("Match not found by MatchMaker.")
+
+
+class Exchange(object):
+    """Implements lookups.
+
+    Subclass this to support hashtables, dns, etc.
+    """
+    def __init__(self):
+        pass
+
+    def run(self, key):
+        raise NotImplementedError()
+
+
+class Binding(object):
+    """A binding on which to perform a lookup."""
+    def __init__(self):
+        pass
+
+    def test(self, key):
+        raise NotImplementedError()
+
+
+class MatchMakerBase(object):
+    """Match Maker Base Class.
+
+    Build off HeartbeatMatchMakerBase if building a heartbeat-capable
+    MatchMaker.
+    """
+    def __init__(self):
+        # Array of tuples. Index [2] toggles negation, [3] is last-if-true
+        self.bindings = []
+
+        self.no_heartbeat_msg = _('Matchmaker does not implement '
+                                  'registration or heartbeat.')
+
+    def register(self, key, host):
+        """Register a host on a backend.
+
+        Heartbeats, if applicable, may keepalive registration.
+        """
+        pass
+
+    def ack_alive(self, key, host):
+        """Acknowledge that a key.host is alive.
+
+        Used internally for updating heartbeats, but may also be used
+        publically to acknowledge a system is alive (i.e. rpc message
+        successfully sent to host)
+        """
+        pass
+
+    def is_alive(self, topic, host):
+        """Checks if a host is alive."""
+        pass
+
+    def expire(self, topic, host):
+        """Explicitly expire a host's registration."""
+        pass
+
+    def send_heartbeats(self):
+        """Send all heartbeats.
+
+        Use start_heartbeat to spawn a heartbeat greenthread,
+        which loops this method.
+        """
+        pass
+
+    def unregister(self, key, host):
+        """Unregister a topic."""
+        pass
+
+    def start_heartbeat(self):
+        """Spawn heartbeat greenthread."""
+        pass
+
+    def stop_heartbeat(self):
+        """Destroys the heartbeat greenthread."""
+        pass
+
+    def add_binding(self, binding, rule, last=True):
+        self.bindings.append((binding, rule, False, last))
+
+    #NOTE(ewindisch): kept the following method in case we implement the
+    #                 underlying support.
+    #def add_negate_binding(self, binding, rule, last=True):
+    #    self.bindings.append((binding, rule, True, last))
+
+    def queues(self, key):
+        workers = []
+
+        # bit is for negate bindings - if we choose to implement it.
+        # last stops processing rules if this matches.
+        for (binding, exchange, bit, last) in self.bindings:
+            if binding.test(key):
+                workers.extend(exchange.run(key))
+
+                # Support last.
+                if last:
+                    return workers
+        return workers
+
+
+class HeartbeatMatchMakerBase(MatchMakerBase):
+    """Base for a heart-beat capable MatchMaker.
+
+    Provides common methods for registering, unregistering, and maintaining
+    heartbeats.
+    """
+    def __init__(self):
+        self.hosts = set()
+        self._heart = None
+        self.host_topic = {}
+
+        super(HeartbeatMatchMakerBase, self).__init__()
+
+    def send_heartbeats(self):
+        """Send all heartbeats.
+
+        Use start_heartbeat to spawn a heartbeat greenthread,
+        which loops this method.
+        """
+        for key, host in self.host_topic:
+            self.ack_alive(key, host)
+
+    def ack_alive(self, key, host):
+        """Acknowledge that a host.topic is alive.
+
+        Used internally for updating heartbeats, but may also be used
+        publically to acknowledge a system is alive (i.e. rpc message
+        successfully sent to host)
+        """
+        raise NotImplementedError("Must implement ack_alive")
+
+    def backend_register(self, key, host):
+        """Implements registration logic.
+
+        Called by register(self,key,host)
+        """
+        raise NotImplementedError("Must implement backend_register")
+
+    def backend_unregister(self, key, key_host):
+        """Implements de-registration logic.
+
+        Called by unregister(self,key,host)
+        """
+        raise NotImplementedError("Must implement backend_unregister")
+
+    def register(self, key, host):
+        """Register a host on a backend.
+
+        Heartbeats, if applicable, may keepalive registration.
+        """
+        self.hosts.add(host)
+        self.host_topic[(key, host)] = host
+        key_host = '.'.join((key, host))
+
+        self.backend_register(key, key_host)
+
+        self.ack_alive(key, host)
+
+    def unregister(self, key, host):
+        """Unregister a topic."""
+        if (key, host) in self.host_topic:
+            del self.host_topic[(key, host)]
+
+        self.hosts.discard(host)
+        self.backend_unregister(key, '.'.join((key, host)))
+
+        LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
+                 {'key': key, 'host': host})
+
+    def start_heartbeat(self):
+        """Implementation of MatchMakerBase.start_heartbeat.
+
+        Launches greenthread looping send_heartbeats(),
+        yielding for CONF.matchmaker_heartbeat_freq seconds
+        between iterations.
+        """
+        if not self.hosts:
+            raise MatchMakerException(
+                _("Register before starting heartbeat."))
+
+        def do_heartbeat():
+            while True:
+                self.send_heartbeats()
+                eventlet.sleep(CONF.matchmaker_heartbeat_freq)
+
+        self._heart = eventlet.spawn(do_heartbeat)
+
+    def stop_heartbeat(self):
+        """Destroys the heartbeat greenthread."""
+        if self._heart:
+            self._heart.kill()
+
+
+class DirectBinding(Binding):
+    """Specifies a host in the key via a '.' character.
+
+    Although dots are used in the key, the behavior here is
+    that it maps directly to a host, thus direct.
+    """
+    def test(self, key):
+        if '.' in key:
+            return True
+        return False
+
+
+class TopicBinding(Binding):
+    """Where a 'bare' key without dots.
+
+    AMQP generally considers topic exchanges to be those *with* dots,
+    but we deviate here in terminology as the behavior here matches
+    that of a topic exchange (whereas where there are dots, behavior
+    matches that of a direct exchange.
+    """
+    def test(self, key):
+        if '.' not in key:
+            return True
+        return False
+
+
+class FanoutBinding(Binding):
+    """Match on fanout keys, where key starts with 'fanout.' string."""
+    def test(self, key):
+        if key.startswith('fanout~'):
+            return True
+        return False
+
+
+class StubExchange(Exchange):
+    """Exchange that does nothing."""
+    def run(self, key):
+        return [(key, None)]
+
+
+class LocalhostExchange(Exchange):
+    """Exchange where all direct topics are local."""
+    def __init__(self, host='localhost'):
+        self.host = host
+        super(Exchange, self).__init__()
+
+    def run(self, key):
+        return [('.'.join((key.split('.')[0], self.host)), self.host)]
+
+
+class DirectExchange(Exchange):
+    """Exchange where all topic keys are split, sending to second half.
+
+    i.e. "compute.host" sends a message to "compute.host" running on "host"
+    """
+    def __init__(self):
+        super(Exchange, self).__init__()
+
+    def run(self, key):
+        e = key.split('.', 1)[1]
+        return [(key, e)]
+
+
+class MatchMakerLocalhost(MatchMakerBase):
+    """Match Maker where all bare topics resolve to localhost.
+
+    Useful for testing.
+    """
+    def __init__(self, host='localhost'):
+        super(MatchMakerLocalhost, self).__init__()
+        self.add_binding(FanoutBinding(), LocalhostExchange(host))
+        self.add_binding(DirectBinding(), DirectExchange())
+        self.add_binding(TopicBinding(), LocalhostExchange(host))
+
+
+class MatchMakerStub(MatchMakerBase):
+    """Match Maker where topics are untouched.
+
+    Useful for testing, or for AMQP/brokered queues.
+    Will not work where knowledge of hosts is known (i.e. zeromq)
+    """
+    def __init__(self):
+        super(MatchMakerStub, self).__init__()
+
+        self.add_binding(FanoutBinding(), StubExchange())
+        self.add_binding(DirectBinding(), StubExchange())
+        self.add_binding(TopicBinding(), StubExchange())
diff --git a/conductor/openstack/common/rpc/matchmaker_redis.py b/conductor/openstack/common/rpc/matchmaker_redis.py
new file mode 100644
index 0000000..50caaae
--- /dev/null
+++ b/conductor/openstack/common/rpc/matchmaker_redis.py
@@ -0,0 +1,145 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2013 Cloudscaling Group, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+The MatchMaker classes should accept a Topic or Fanout exchange key and
+return keys for direct exchanges, per (approximate) AMQP parlance.
+"""
+
+from oslo.config import cfg
+
+from conductor.openstack.common import importutils
+from conductor.openstack.common import log as logging
+from conductor.openstack.common.rpc import matchmaker as mm_common
+
+redis = importutils.try_import('redis')
+
+
+matchmaker_redis_opts = [
+    cfg.StrOpt('host',
+               default='127.0.0.1',
+               help='Host to locate redis'),
+    cfg.IntOpt('port',
+               default=6379,
+               help='Use this port to connect to redis host.'),
+    cfg.StrOpt('password',
+               default=None,
+               help='Password for Redis server. (optional)'),
+]
+
+CONF = cfg.CONF
+opt_group = cfg.OptGroup(name='matchmaker_redis',
+                         title='Options for Redis-based MatchMaker')
+CONF.register_group(opt_group)
+CONF.register_opts(matchmaker_redis_opts, opt_group)
+LOG = logging.getLogger(__name__)
+
+
+class RedisExchange(mm_common.Exchange):
+    def __init__(self, matchmaker):
+        self.matchmaker = matchmaker
+        self.redis = matchmaker.redis
+        super(RedisExchange, self).__init__()
+
+
+class RedisTopicExchange(RedisExchange):
+    """Exchange where all topic keys are split, sending to second half.
+
+    i.e. "compute.host" sends a message to "compute" running on "host"
+    """
+    def run(self, topic):
+        while True:
+            member_name = self.redis.srandmember(topic)
+
+            if not member_name:
+                # If this happens, there are no
+                # longer any members.
+                break
+
+            if not self.matchmaker.is_alive(topic, member_name):
+                continue
+
+            host = member_name.split('.', 1)[1]
+            return [(member_name, host)]
+        return []
+
+
+class RedisFanoutExchange(RedisExchange):
+    """Return a list of all hosts."""
+    def run(self, topic):
+        topic = topic.split('~', 1)[1]
+        hosts = self.redis.smembers(topic)
+        good_hosts = filter(
+            lambda host: self.matchmaker.is_alive(topic, host), hosts)
+
+        return [(x, x.split('.', 1)[1]) for x in good_hosts]
+
+
+class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
+    """MatchMaker registering and looking-up hosts with a Redis server."""
+    def __init__(self):
+        super(MatchMakerRedis, self).__init__()
+
+        if not redis:
+            raise ImportError("Failed to import module redis.")
+
+        self.redis = redis.StrictRedis(
+            host=CONF.matchmaker_redis.host,
+            port=CONF.matchmaker_redis.port,
+            password=CONF.matchmaker_redis.password)
+
+        self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
+        self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
+        self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
+
+    def ack_alive(self, key, host):
+        topic = "%s.%s" % (key, host)
+        if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
+            # If we could not update the expiration, the key
+            # might have been pruned. Re-register, creating a new
+            # key in Redis.
+            self.register(self.topic_host[host], host)
+
+    def is_alive(self, topic, host):
+        if self.redis.ttl(host) == -1:
+            self.expire(topic, host)
+            return False
+        return True
+
+    def expire(self, topic, host):
+        with self.redis.pipeline() as pipe:
+            pipe.multi()
+            pipe.delete(host)
+            pipe.srem(topic, host)
+            pipe.execute()
+
+    def backend_register(self, key, key_host):
+        with self.redis.pipeline() as pipe:
+            pipe.multi()
+            pipe.sadd(key, key_host)
+
+            # No value is needed, we just
+            # care if it exists. Sets aren't viable
+            # because only keys can expire.
+            pipe.set(key_host, '')
+
+            pipe.execute()
+
+    def backend_unregister(self, key, key_host):
+        with self.redis.pipeline() as pipe:
+            pipe.multi()
+            pipe.srem(key, key_host)
+            pipe.delete(key_host)
+            pipe.execute()
diff --git a/conductor/openstack/common/rpc/matchmaker_ring.py b/conductor/openstack/common/rpc/matchmaker_ring.py
new file mode 100644
index 0000000..b31b3da
--- /dev/null
+++ b/conductor/openstack/common/rpc/matchmaker_ring.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2011-2013 Cloudscaling Group, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+The MatchMaker classes should except a Topic or Fanout exchange key and
+return keys for direct exchanges, per (approximate) AMQP parlance.
+"""
+
+import itertools
+import json
+
+from oslo.config import cfg
+
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import log as logging
+from conductor.openstack.common.rpc import matchmaker as mm
+
+
+matchmaker_opts = [
+    # Matchmaker ring file
+    cfg.StrOpt('ringfile',
+               deprecated_name='matchmaker_ringfile',
+               deprecated_group='DEFAULT',
+               default='/etc/oslo/matchmaker_ring.json',
+               help='Matchmaker ring file (JSON)'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
+LOG = logging.getLogger(__name__)
+
+
+class RingExchange(mm.Exchange):
+    """Match Maker where hosts are loaded from a static JSON formatted file.
+
+    __init__ takes optional ring dictionary argument, otherwise
+    loads the ringfile from CONF.mathcmaker_ringfile.
+    """
+    def __init__(self, ring=None):
+        super(RingExchange, self).__init__()
+
+        if ring:
+            self.ring = ring
+        else:
+            fh = open(CONF.matchmaker_ring.ringfile, 'r')
+            self.ring = json.load(fh)
+            fh.close()
+
+        self.ring0 = {}
+        for k in self.ring.keys():
+            self.ring0[k] = itertools.cycle(self.ring[k])
+
+    def _ring_has(self, key):
+        if key in self.ring0:
+            return True
+        return False
+
+
+class RoundRobinRingExchange(RingExchange):
+    """A Topic Exchange based on a hashmap."""
+    def __init__(self, ring=None):
+        super(RoundRobinRingExchange, self).__init__(ring)
+
+    def run(self, key):
+        if not self._ring_has(key):
+            LOG.warn(
+                _("No key defining hosts for topic '%s', "
+                  "see ringfile") % (key, )
+            )
+            return []
+        host = next(self.ring0[key])
+        return [(key + '.' + host, host)]
+
+
+class FanoutRingExchange(RingExchange):
+    """Fanout Exchange based on a hashmap."""
+    def __init__(self, ring=None):
+        super(FanoutRingExchange, self).__init__(ring)
+
+    def run(self, key):
+        # Assume starts with "fanout~", strip it for lookup.
+        nkey = key.split('fanout~')[1:][0]
+        if not self._ring_has(nkey):
+            LOG.warn(
+                _("No key defining hosts for topic '%s', "
+                  "see ringfile") % (nkey, )
+            )
+            return []
+        return map(lambda x: (key + '.' + x, x), self.ring[nkey])
+
+
+class MatchMakerRing(mm.MatchMakerBase):
+    """Match Maker where hosts are loaded from a static hashmap."""
+    def __init__(self, ring=None):
+        super(MatchMakerRing, self).__init__()
+        self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
+        self.add_binding(mm.DirectBinding(), mm.DirectExchange())
+        self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
diff --git a/conductor/openstack/common/rpc/proxy.py b/conductor/openstack/common/rpc/proxy.py
new file mode 100644
index 0000000..2ef9c3f
--- /dev/null
+++ b/conductor/openstack/common/rpc/proxy.py
@@ -0,0 +1,226 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012-2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+A helper class for proxy objects to remote APIs.
+
+For more information about rpc API version numbers, see:
+    rpc/dispatcher.py
+"""
+
+
+from conductor.openstack.common import rpc
+from conductor.openstack.common.rpc import common as rpc_common
+from conductor.openstack.common.rpc import serializer as rpc_serializer
+
+
+class RpcProxy(object):
+    """A helper class for rpc clients.
+
+    This class is a wrapper around the RPC client API.  It allows you to
+    specify the topic and API version in a single place.  This is intended to
+    be used as a base class for a class that implements the client side of an
+    rpc API.
+    """
+
+    # The default namespace, which can be overriden in a subclass.
+    RPC_API_NAMESPACE = None
+
+    def __init__(self, topic, default_version, version_cap=None,
+                 serializer=None):
+        """Initialize an RpcProxy.
+
+        :param topic: The topic to use for all messages.
+        :param default_version: The default API version to request in all
+               outgoing messages.  This can be overridden on a per-message
+               basis.
+        :param version_cap: Optionally cap the maximum version used for sent
+               messages.
+        :param serializer: Optionaly (de-)serialize entities with a
+               provided helper.
+        """
+        self.topic = topic
+        self.default_version = default_version
+        self.version_cap = version_cap
+        if serializer is None:
+            serializer = rpc_serializer.NoOpSerializer()
+        self.serializer = serializer
+        super(RpcProxy, self).__init__()
+
+    def _set_version(self, msg, vers):
+        """Helper method to set the version in a message.
+
+        :param msg: The message having a version added to it.
+        :param vers: The version number to add to the message.
+        """
+        v = vers if vers else self.default_version
+        if (self.version_cap and not
+                rpc_common.version_is_compatible(self.version_cap, v)):
+            raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
+        msg['version'] = v
+
+    def _get_topic(self, topic):
+        """Return the topic to use for a message."""
+        return topic if topic else self.topic
+
+    def can_send_version(self, version):
+        """Check to see if a version is compatible with the version cap."""
+        return (not self.version_cap or
+                rpc_common.version_is_compatible(self.version_cap, version))
+
+    @staticmethod
+    def make_namespaced_msg(method, namespace, **kwargs):
+        return {'method': method, 'namespace': namespace, 'args': kwargs}
+
+    def make_msg(self, method, **kwargs):
+        return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
+                                        **kwargs)
+
+    def _serialize_msg_args(self, context, kwargs):
+        """Helper method called to serialize message arguments.
+
+        This calls our serializer on each argument, returning a new
+        set of args that have been serialized.
+
+        :param context: The request context
+        :param kwargs: The arguments to serialize
+        :returns: A new set of serialized arguments
+        """
+        new_kwargs = dict()
+        for argname, arg in kwargs.iteritems():
+            new_kwargs[argname] = self.serializer.serialize_entity(context,
+                                                                   arg)
+        return new_kwargs
+
+    def call(self, context, msg, topic=None, version=None, timeout=None):
+        """rpc.call() a remote method.
+
+        :param context: The request context
+        :param msg: The message to send, including the method and args.
+        :param topic: Override the topic for this message.
+        :param version: (Optional) Override the requested API version in this
+               message.
+        :param timeout: (Optional) A timeout to use when waiting for the
+               response.  If no timeout is specified, a default timeout will be
+               used that is usually sufficient.
+
+        :returns: The return value from the remote method.
+        """
+        self._set_version(msg, version)
+        msg['args'] = self._serialize_msg_args(context, msg['args'])
+        real_topic = self._get_topic(topic)
+        try:
+            result = rpc.call(context, real_topic, msg, timeout)
+            return self.serializer.deserialize_entity(context, result)
+        except rpc.common.Timeout as exc:
+            raise rpc.common.Timeout(
+                exc.info, real_topic, msg.get('method'))
+
+    def multicall(self, context, msg, topic=None, version=None, timeout=None):
+        """rpc.multicall() a remote method.
+
+        :param context: The request context
+        :param msg: The message to send, including the method and args.
+        :param topic: Override the topic for this message.
+        :param version: (Optional) Override the requested API version in this
+               message.
+        :param timeout: (Optional) A timeout to use when waiting for the
+               response.  If no timeout is specified, a default timeout will be
+               used that is usually sufficient.
+
+        :returns: An iterator that lets you process each of the returned values
+                  from the remote method as they arrive.
+        """
+        self._set_version(msg, version)
+        msg['args'] = self._serialize_msg_args(context, msg['args'])
+        real_topic = self._get_topic(topic)
+        try:
+            result = rpc.multicall(context, real_topic, msg, timeout)
+            return self.serializer.deserialize_entity(context, result)
+        except rpc.common.Timeout as exc:
+            raise rpc.common.Timeout(
+                exc.info, real_topic, msg.get('method'))
+
+    def cast(self, context, msg, topic=None, version=None):
+        """rpc.cast() a remote method.
+
+        :param context: The request context
+        :param msg: The message to send, including the method and args.
+        :param topic: Override the topic for this message.
+        :param version: (Optional) Override the requested API version in this
+               message.
+
+        :returns: None.  rpc.cast() does not wait on any return value from the
+                  remote method.
+        """
+        self._set_version(msg, version)
+        msg['args'] = self._serialize_msg_args(context, msg['args'])
+        rpc.cast(context, self._get_topic(topic), msg)
+
+    def fanout_cast(self, context, msg, topic=None, version=None):
+        """rpc.fanout_cast() a remote method.
+
+        :param context: The request context
+        :param msg: The message to send, including the method and args.
+        :param topic: Override the topic for this message.
+        :param version: (Optional) Override the requested API version in this
+               message.
+
+        :returns: None.  rpc.fanout_cast() does not wait on any return value
+                  from the remote method.
+        """
+        self._set_version(msg, version)
+        msg['args'] = self._serialize_msg_args(context, msg['args'])
+        rpc.fanout_cast(context, self._get_topic(topic), msg)
+
+    def cast_to_server(self, context, server_params, msg, topic=None,
+                       version=None):
+        """rpc.cast_to_server() a remote method.
+
+        :param context: The request context
+        :param server_params: Server parameters.  See rpc.cast_to_server() for
+               details.
+        :param msg: The message to send, including the method and args.
+        :param topic: Override the topic for this message.
+        :param version: (Optional) Override the requested API version in this
+               message.
+
+        :returns: None.  rpc.cast_to_server() does not wait on any
+                  return values.
+        """
+        self._set_version(msg, version)
+        msg['args'] = self._serialize_msg_args(context, msg['args'])
+        rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
+
+    def fanout_cast_to_server(self, context, server_params, msg, topic=None,
+                              version=None):
+        """rpc.fanout_cast_to_server() a remote method.
+
+        :param context: The request context
+        :param server_params: Server parameters.  See rpc.cast_to_server() for
+               details.
+        :param msg: The message to send, including the method and args.
+        :param topic: Override the topic for this message.
+        :param version: (Optional) Override the requested API version in this
+               message.
+
+        :returns: None.  rpc.fanout_cast_to_server() does not wait on any
+                  return values.
+        """
+        self._set_version(msg, version)
+        msg['args'] = self._serialize_msg_args(context, msg['args'])
+        rpc.fanout_cast_to_server(context, server_params,
+                                  self._get_topic(topic), msg)
diff --git a/conductor/openstack/common/rpc/serializer.py b/conductor/openstack/common/rpc/serializer.py
new file mode 100644
index 0000000..76c6831
--- /dev/null
+++ b/conductor/openstack/common/rpc/serializer.py
@@ -0,0 +1,52 @@
+#    Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Provides the definition of an RPC serialization handler"""
+
+import abc
+
+
+class Serializer(object):
+    """Generic (de-)serialization definition base class."""
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def serialize_entity(self, context, entity):
+        """Serialize something to primitive form.
+
+        :param context: Security context
+        :param entity: Entity to be serialized
+        :returns: Serialized form of entity
+        """
+        pass
+
+    @abc.abstractmethod
+    def deserialize_entity(self, context, entity):
+        """Deserialize something from primitive form.
+
+        :param context: Security context
+        :param entity: Primitive to be deserialized
+        :returns: Deserialized form of entity
+        """
+        pass
+
+
+class NoOpSerializer(Serializer):
+    """A serializer that does nothing."""
+
+    def serialize_entity(self, context, entity):
+        return entity
+
+    def deserialize_entity(self, context, entity):
+        return entity
diff --git a/conductor/openstack/common/rpc/service.py b/conductor/openstack/common/rpc/service.py
new file mode 100644
index 0000000..1834892
--- /dev/null
+++ b/conductor/openstack/common/rpc/service.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from conductor.openstack.common.gettextutils import _  # noqa
+from conductor.openstack.common import log as logging
+from conductor.openstack.common import rpc
+from conductor.openstack.common.rpc import dispatcher as rpc_dispatcher
+from conductor.openstack.common import service
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Service(service.Service):
+    """Service object for binaries running on hosts.
+
+    A service enables rpc by listening to queues based on topic and host.
+    """
+    def __init__(self, host, topic, manager=None, serializer=None):
+        super(Service, self).__init__()
+        self.host = host
+        self.topic = topic
+        self.serializer = serializer
+        if manager is None:
+            self.manager = self
+        else:
+            self.manager = manager
+
+    def start(self):
+        super(Service, self).start()
+
+        self.conn = rpc.create_connection(new=True)
+        LOG.debug(_("Creating Consumer connection for Service %s") %
+                  self.topic)
+
+        dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
+                                                  self.serializer)
+
+        # Share this same connection for these Consumers
+        self.conn.create_consumer(self.topic, dispatcher, fanout=False)
+
+        node_topic = '%s.%s' % (self.topic, self.host)
+        self.conn.create_consumer(node_topic, dispatcher, fanout=False)
+
+        self.conn.create_consumer(self.topic, dispatcher, fanout=True)
+
+        # Hook to allow the manager to do other initializations after
+        # the rpc connection is created.
+        if callable(getattr(self.manager, 'initialize_service_hook', None)):
+            self.manager.initialize_service_hook(self)
+
+        # Consume from all consumers in a thread
+        self.conn.consume_in_thread()
+
+    def stop(self):
+        # Try to shut the connection down, but if we get any sort of
+        # errors, go ahead and ignore them.. as we're shutting down anyway
+        try:
+            self.conn.close()
+        except Exception:
+            pass
+        super(Service, self).stop()
diff --git a/conductor/openstack/common/rpc/zmq_receiver.py b/conductor/openstack/common/rpc/zmq_receiver.py
new file mode 100755
index 0000000..3cdd4d0
--- /dev/null
+++ b/conductor/openstack/common/rpc/zmq_receiver.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Copyright 2011 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import eventlet
+eventlet.monkey_patch()
+
+import contextlib
+import sys
+
+from oslo.config import cfg
+
+from conductor.openstack.common import log as logging
+from conductor.openstack.common import rpc
+from conductor.openstack.common.rpc import impl_zmq
+
+CONF = cfg.CONF
+CONF.register_opts(rpc.rpc_opts)
+CONF.register_opts(impl_zmq.zmq_opts)
+
+
+def main():
+    CONF(sys.argv[1:], project='oslo')
+    logging.setup("oslo")
+
+    with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
+        reactor.consume_in_thread()
+        reactor.wait()
diff --git a/conductor/openstack/common/service.py b/conductor/openstack/common/service.py
index a31b41a..e54426a 100644
--- a/conductor/openstack/common/service.py
+++ b/conductor/openstack/common/service.py
@@ -27,11 +27,12 @@ import sys
 import time
 
 import eventlet
+from eventlet import event
 import logging as std_logging
 from oslo.config import cfg
 
 from conductor.openstack.common import eventlet_backdoor
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
 from conductor.openstack.common import importutils
 from conductor.openstack.common import log as logging
 from conductor.openstack.common import threadgroup
@@ -51,19 +52,8 @@ class Launcher(object):
         :returns: None
 
         """
-        self._services = threadgroup.ThreadGroup()
-        eventlet_backdoor.initialize_if_enabled()
-
-    @staticmethod
-    def run_service(service):
-        """Start and wait for a service to finish.
-
-        :param service: service to run and wait for.
-        :returns: None
-
-        """
-        service.start()
-        service.wait()
+        self.services = Services()
+        self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
 
     def launch_service(self, service):
         """Load and start the given service.
@@ -72,7 +62,8 @@ class Launcher(object):
         :returns: None
 
         """
-        self._services.add_thread(self.run_service, service)
+        service.backdoor_port = self.backdoor_port
+        self.services.add(service)
 
     def stop(self):
         """Stop all services which are currently running.
@@ -80,7 +71,7 @@ class Launcher(object):
         :returns: None
 
         """
-        self._services.stop()
+        self.services.stop()
 
     def wait(self):
         """Waits until all services have been stopped, and then returns.
@@ -88,7 +79,7 @@ class Launcher(object):
         :returns: None
 
         """
-        self._services.wait()
+        self.services.wait()
 
 
 class SignalExit(SystemExit):
@@ -123,9 +114,13 @@ class ServiceLauncher(Launcher):
         except SystemExit as exc:
             status = exc.code
         finally:
-            if rpc:
-                rpc.cleanup()
             self.stop()
+            if rpc:
+                try:
+                    rpc.cleanup()
+                except Exception:
+                    # We're shutting down, so it doesn't matter at this point.
+                    LOG.exception(_('Exception during rpc cleanup.'))
         return status
 
 
@@ -188,7 +183,8 @@ class ProcessLauncher(object):
         random.seed()
 
         launcher = Launcher()
-        launcher.run_service(service)
+        launcher.launch_service(service)
+        launcher.wait()
 
     def _start_child(self, wrap):
         if len(wrap.forktimes) > wrap.workers:
@@ -270,7 +266,7 @@ class ProcessLauncher(object):
         return wrap
 
     def wait(self):
-        """Loop waiting on children to die and respawning as necessary"""
+        """Loop waiting on children to die and respawning as necessary."""
 
         LOG.debug(_('Full set of CONF:'))
         CONF.log_opt_values(LOG, std_logging.DEBUG)
@@ -312,15 +308,63 @@ class Service(object):
     def __init__(self, threads=1000):
         self.tg = threadgroup.ThreadGroup(threads)
 
+        # signal that the service is done shutting itself down:
+        self._done = event.Event()
+
     def start(self):
         pass
 
     def stop(self):
         self.tg.stop()
+        self.tg.wait()
+        # Signal that service cleanup is done:
+        if not self._done.ready():
+            self._done.send()
+
+    def wait(self):
+        self._done.wait()
+
+
+class Services(object):
+
+    def __init__(self):
+        self.services = []
+        self.tg = threadgroup.ThreadGroup()
+        self.done = event.Event()
+
+    def add(self, service):
+        self.services.append(service)
+        self.tg.add_thread(self.run_service, service, self.done)
+
+    def stop(self):
+        # wait for graceful shutdown of services:
+        for service in self.services:
+            service.stop()
+            service.wait()
+
+        # Each service has performed cleanup, now signal that the run_service
+        # wrapper threads can now die:
+        if not self.done.ready():
+            self.done.send()
+
+        # reap threads:
+        self.tg.stop()
 
     def wait(self):
         self.tg.wait()
 
+    @staticmethod
+    def run_service(service, done):
+        """Service start wrapper.
+
+        :param service: service to run
+        :param done: event to wait on until a shutdown is triggered
+        :returns: None
+
+        """
+        service.start()
+        done.wait()
+
 
 def launch(service, workers=None):
     if workers:
diff --git a/conductor/openstack/common/setup.py b/conductor/openstack/common/setup.py
deleted file mode 100644
index dec74fd..0000000
--- a/conductor/openstack/common/setup.py
+++ /dev/null
@@ -1,367 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack Foundation.
-# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Utilities with minimum-depends for use in setup.py
-"""
-
-import email
-import os
-import re
-import subprocess
-import sys
-
-from setuptools.command import sdist
-
-
-def parse_mailmap(mailmap='.mailmap'):
-    mapping = {}
-    if os.path.exists(mailmap):
-        with open(mailmap, 'r') as fp:
-            for l in fp:
-                try:
-                    canonical_email, alias = re.match(
-                        r'[^#]*?(<.+>).*(<.+>).*', l).groups()
-                except AttributeError:
-                    continue
-                mapping[alias] = canonical_email
-    return mapping
-
-
-def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
-    mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
-    return parse_mailmap(mailmap)
-
-
-def canonicalize_emails(changelog, mapping):
-    """Takes in a string and an email alias mapping and replaces all
-       instances of the aliases in the string with their real email.
-    """
-    for alias, email_address in mapping.iteritems():
-        changelog = changelog.replace(alias, email_address)
-    return changelog
-
-
-# Get requirements from the first file that exists
-def get_reqs_from_files(requirements_files):
-    for requirements_file in requirements_files:
-        if os.path.exists(requirements_file):
-            with open(requirements_file, 'r') as fil:
-                return fil.read().split('\n')
-    return []
-
-
-def parse_requirements(requirements_files=['requirements.txt',
-                                           'tools/pip-requires']):
-    requirements = []
-    for line in get_reqs_from_files(requirements_files):
-        # For the requirements list, we need to inject only the portion
-        # after egg= so that distutils knows the package it's looking for
-        # such as:
-        # -e git://github.com/openstack/nova/master#egg=nova
-        if re.match(r'\s*-e\s+', line):
-            requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
-                                line))
-        # such as:
-        # http://github.com/openstack/nova/zipball/master#egg=nova
-        elif re.match(r'\s*https?:', line):
-            requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
-                                line))
-        # -f lines are for index locations, and don't get used here
-        elif re.match(r'\s*-f\s+', line):
-            pass
-        # argparse is part of the standard library starting with 2.7
-        # adding it to the requirements list screws distro installs
-        elif line == 'argparse' and sys.version_info >= (2, 7):
-            pass
-        else:
-            requirements.append(line)
-
-    return requirements
-
-
-def parse_dependency_links(requirements_files=['requirements.txt',
-                                               'tools/pip-requires']):
-    dependency_links = []
-    # dependency_links inject alternate locations to find packages listed
-    # in requirements
-    for line in get_reqs_from_files(requirements_files):
-        # skip comments and blank lines
-        if re.match(r'(\s*#)|(\s*$)', line):
-            continue
-        # lines with -e or -f need the whole line, minus the flag
-        if re.match(r'\s*-[ef]\s+', line):
-            dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
-        # lines that are only urls can go in unmolested
-        elif re.match(r'\s*https?:', line):
-            dependency_links.append(line)
-    return dependency_links
-
-
-def _run_shell_command(cmd, throw_on_error=False):
-    if os.name == 'nt':
-        output = subprocess.Popen(["cmd.exe", "/C", cmd],
-                                  stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-    else:
-        output = subprocess.Popen(["/bin/sh", "-c", cmd],
-                                  stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-    out = output.communicate()
-    if output.returncode and throw_on_error:
-        raise Exception("%s returned %d" % cmd, output.returncode)
-    if len(out) == 0:
-        return None
-    if len(out[0].strip()) == 0:
-        return None
-    return out[0].strip()
-
-
-def _get_git_directory():
-    parent_dir = os.path.dirname(__file__)
-    while True:
-        git_dir = os.path.join(parent_dir, '.git')
-        if os.path.exists(git_dir):
-            return git_dir
-        parent_dir, child = os.path.split(parent_dir)
-        if not child:   # reached to root dir
-            return None
-
-
-def write_git_changelog():
-    """Write a changelog based on the git changelog."""
-    new_changelog = 'ChangeLog'
-    git_dir = _get_git_directory()
-    if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
-        if git_dir:
-            git_log_cmd = 'git --git-dir=%s log' % git_dir
-            changelog = _run_shell_command(git_log_cmd)
-            mailmap = _parse_git_mailmap(git_dir)
-            with open(new_changelog, "w") as changelog_file:
-                changelog_file.write(canonicalize_emails(changelog, mailmap))
-    else:
-        open(new_changelog, 'w').close()
-
-
-def generate_authors():
-    """Create AUTHORS file using git commits."""
-    jenkins_email = 'jenkins@review.(openstack|stackforge).org'
-    old_authors = 'AUTHORS.in'
-    new_authors = 'AUTHORS'
-    git_dir = _get_git_directory()
-    if not os.getenv('SKIP_GENERATE_AUTHORS'):
-        if git_dir:
-            # don't include jenkins email address in AUTHORS file
-            git_log_cmd = ("git --git-dir=" + git_dir +
-                           " log --format='%aN <%aE>' | sort -u | "
-                           "egrep -v '" + jenkins_email + "'")
-            changelog = _run_shell_command(git_log_cmd)
-            signed_cmd = ("git log --git-dir=" + git_dir +
-                          " | grep -i Co-authored-by: | sort -u")
-            signed_entries = _run_shell_command(signed_cmd)
-            if signed_entries:
-                new_entries = "\n".join(
-                    [signed.split(":", 1)[1].strip()
-                     for signed in signed_entries.split("\n") if signed])
-                changelog = "\n".join((changelog, new_entries))
-            mailmap = _parse_git_mailmap(git_dir)
-            with open(new_authors, 'w') as new_authors_fh:
-                new_authors_fh.write(canonicalize_emails(changelog, mailmap))
-                if os.path.exists(old_authors):
-                    with open(old_authors, "r") as old_authors_fh:
-                        new_authors_fh.write('\n' + old_authors_fh.read())
-    else:
-        open(new_authors, 'w').close()
-
-
-_rst_template = """%(heading)s
-%(underline)s
-
-.. automodule:: %(module)s
-  :members:
-  :undoc-members:
-  :show-inheritance:
-"""
-
-
-def get_cmdclass():
-    """Return dict of commands to run from setup.py."""
-
-    cmdclass = dict()
-
-    def _find_modules(arg, dirname, files):
-        for filename in files:
-            if filename.endswith('.py') and filename != '__init__.py':
-                arg["%s.%s" % (dirname.replace('/', '.'),
-                               filename[:-3])] = True
-
-    class LocalSDist(sdist.sdist):
-        """Builds the ChangeLog and Authors files from VC first."""
-
-        def run(self):
-            write_git_changelog()
-            generate_authors()
-            # sdist.sdist is an old style class, can't use super()
-            sdist.sdist.run(self)
-
-    cmdclass['sdist'] = LocalSDist
-
-    # If Sphinx is installed on the box running setup.py,
-    # enable setup.py to build the documentation, otherwise,
-    # just ignore it
-    try:
-        from sphinx.setup_command import BuildDoc
-
-        class LocalBuildDoc(BuildDoc):
-
-            builders = ['html', 'man']
-
-            def generate_autoindex(self):
-                print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
-                modules = {}
-                option_dict = self.distribution.get_option_dict('build_sphinx')
-                source_dir = os.path.join(option_dict['source_dir'][1], 'api')
-                if not os.path.exists(source_dir):
-                    os.makedirs(source_dir)
-                for pkg in self.distribution.packages:
-                    if '.' not in pkg:
-                        os.path.walk(pkg, _find_modules, modules)
-                module_list = modules.keys()
-                module_list.sort()
-                autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
-                with open(autoindex_filename, 'w') as autoindex:
-                    autoindex.write(""".. toctree::
-   :maxdepth: 1
-
-""")
-                    for module in module_list:
-                        output_filename = os.path.join(source_dir,
-                                                       "%s.rst" % module)
-                        heading = "The :mod:`%s` Module" % module
-                        underline = "=" * len(heading)
-                        values = dict(module=module, heading=heading,
-                                      underline=underline)
-
-                        print "Generating %s" % output_filename
-                        with open(output_filename, 'w') as output_file:
-                            output_file.write(_rst_template % values)
-                        autoindex.write("   %s.rst\n" % module)
-
-            def run(self):
-                if not os.getenv('SPHINX_DEBUG'):
-                    self.generate_autoindex()
-
-                for builder in self.builders:
-                    self.builder = builder
-                    self.finalize_options()
-                    self.project = self.distribution.get_name()
-                    self.version = self.distribution.get_version()
-                    self.release = self.distribution.get_version()
-                    BuildDoc.run(self)
-
-        class LocalBuildLatex(LocalBuildDoc):
-            builders = ['latex']
-
-        cmdclass['build_sphinx'] = LocalBuildDoc
-        cmdclass['build_sphinx_latex'] = LocalBuildLatex
-    except ImportError:
-        pass
-
-    return cmdclass
-
-
-def _get_revno(git_dir):
-    """Return the number of commits since the most recent tag.
-
-    We use git-describe to find this out, but if there are no
-    tags then we fall back to counting commits since the beginning
-    of time.
-    """
-    describe = _run_shell_command(
-        "git --git-dir=%s describe --always" % git_dir)
-    if "-" in describe:
-        return describe.rsplit("-", 2)[-2]
-
-    # no tags found
-    revlist = _run_shell_command(
-        "git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
-    return len(revlist.splitlines())
-
-
-def _get_version_from_git(pre_version):
-    """Return a version which is equal to the tag that's on the current
-    revision if there is one, or tag plus number of additional revisions
-    if the current revision has no tag."""
-
-    git_dir = _get_git_directory()
-    if git_dir:
-        if pre_version:
-            try:
-                return _run_shell_command(
-                    "git --git-dir=" + git_dir + " describe --exact-match",
-                    throw_on_error=True).replace('-', '.')
-            except Exception:
-                sha = _run_shell_command(
-                    "git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
-                return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
-        else:
-            return _run_shell_command(
-                "git --git-dir=" + git_dir + " describe --always").replace(
-                    '-', '.')
-    return None
-
-
-def _get_version_from_pkg_info(package_name):
-    """Get the version from PKG-INFO file if we can."""
-    try:
-        pkg_info_file = open('PKG-INFO', 'r')
-    except (IOError, OSError):
-        return None
-    try:
-        pkg_info = email.message_from_file(pkg_info_file)
-    except email.MessageError:
-        return None
-    # Check to make sure we're in our own dir
-    if pkg_info.get('Name', None) != package_name:
-        return None
-    return pkg_info.get('Version', None)
-
-
-def get_version(package_name, pre_version=None):
-    """Get the version of the project. First, try getting it from PKG-INFO, if
-    it exists. If it does, that means we're in a distribution tarball or that
-    install has happened. Otherwise, if there is no PKG-INFO file, pull the
-    version from git.
-
-    We do not support setup.py version sanity in git archive tarballs, nor do
-    we support packagers directly sucking our git repo into theirs. We expect
-    that a source tarball be made from our git repo - or that if someone wants
-    to make a source tarball from a fork of our repo with additional tags in it
-    that they understand and desire the results of doing that.
-    """
-    version = os.environ.get("OSLO_PACKAGE_VERSION", None)
-    if version:
-        return version
-    version = _get_version_from_pkg_info(package_name)
-    if version:
-        return version
-    version = _get_version_from_git(pre_version)
-    if version:
-        return version
-    raise Exception("Versioning for this project requires either an sdist"
-                    " tarball, or access to an upstream git repository.")
diff --git a/conductor/openstack/common/sslutils.py b/conductor/openstack/common/sslutils.py
index 6ccbac8..1141ee1 100644
--- a/conductor/openstack/common/sslutils.py
+++ b/conductor/openstack/common/sslutils.py
@@ -1,6 +1,6 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
-# Copyright 2013 IBM
+# Copyright 2013 IBM Corp.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -19,7 +19,7 @@ import ssl
 
 from oslo.config import cfg
 
-from conductor.openstack.common.gettextutils import _
+from conductor.openstack.common.gettextutils import _  # noqa
 
 
 ssl_opts = [
@@ -78,3 +78,23 @@ def wrap(sock):
         ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
 
     return ssl.wrap_socket(sock, **ssl_kwargs)
+
+
+_SSL_PROTOCOLS = {
+    "tlsv1": ssl.PROTOCOL_TLSv1,
+    "sslv23": ssl.PROTOCOL_SSLv23,
+    "sslv3": ssl.PROTOCOL_SSLv3
+}
+
+try:
+    _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
+except AttributeError:
+    pass
+
+
+def validate_ssl_version(version):
+    key = version.lower()
+    try:
+        return _SSL_PROTOCOLS[key]
+    except KeyError:
+        raise RuntimeError(_("Invalid SSL version : %s") % version)
diff --git a/conductor/openstack/common/threadgroup.py b/conductor/openstack/common/threadgroup.py
index 5c986aa..eb3857e 100644
--- a/conductor/openstack/common/threadgroup.py
+++ b/conductor/openstack/common/threadgroup.py
@@ -14,7 +14,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from eventlet import greenlet
+import eventlet
 from eventlet import greenpool
 from eventlet import greenthread
 
@@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
 
 
 def _thread_done(gt, *args, **kwargs):
-    """ Callback function to be passed to GreenThread.link() when we spawn()
+    """Callback function to be passed to GreenThread.link() when we spawn()
     Calls the :class:`ThreadGroup` to notify if.
 
     """
@@ -34,7 +34,7 @@ def _thread_done(gt, *args, **kwargs):
 
 
 class Thread(object):
-    """ Wrapper around a greenthread, that holds a reference to the
+    """Wrapper around a greenthread, that holds a reference to the
     :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
     it has done so it can be removed from the threads list.
     """
@@ -50,7 +50,7 @@ class Thread(object):
 
 
 class ThreadGroup(object):
-    """ The point of the ThreadGroup classis to:
+    """The point of the ThreadGroup classis to:
 
     * keep track of timers and greenthreads (making it easier to stop them
       when need be).
@@ -61,9 +61,16 @@ class ThreadGroup(object):
         self.threads = []
         self.timers = []
 
+    def add_dynamic_timer(self, callback, initial_delay=None,
+                          periodic_interval_max=None, *args, **kwargs):
+        timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
+        timer.start(initial_delay=initial_delay,
+                    periodic_interval_max=periodic_interval_max)
+        self.timers.append(timer)
+
     def add_timer(self, interval, callback, initial_delay=None,
                   *args, **kwargs):
-        pulse = loopingcall.LoopingCall(callback, *args, **kwargs)
+        pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
         pulse.start(interval=interval,
                     initial_delay=initial_delay)
         self.timers.append(pulse)
@@ -98,7 +105,7 @@ class ThreadGroup(object):
         for x in self.timers:
             try:
                 x.wait()
-            except greenlet.GreenletExit:
+            except eventlet.greenlet.GreenletExit:
                 pass
             except Exception as ex:
                 LOG.exception(ex)
@@ -108,7 +115,7 @@ class ThreadGroup(object):
                 continue
             try:
                 x.wait()
-            except greenlet.GreenletExit:
+            except eventlet.greenlet.GreenletExit:
                 pass
             except Exception as ex:
                 LOG.exception(ex)
diff --git a/conductor/openstack/common/timeutils.py b/conductor/openstack/common/timeutils.py
index 6094365..bd60489 100644
--- a/conductor/openstack/common/timeutils.py
+++ b/conductor/openstack/common/timeutils.py
@@ -23,6 +23,7 @@ import calendar
 import datetime
 
 import iso8601
+import six
 
 
 # ISO 8601 extended time format with microseconds
@@ -32,7 +33,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
 
 
 def isotime(at=None, subsecond=False):
-    """Stringify time in ISO 8601 format"""
+    """Stringify time in ISO 8601 format."""
     if not at:
         at = utcnow()
     st = at.strftime(_ISO8601_TIME_FORMAT
@@ -44,7 +45,7 @@ def isotime(at=None, subsecond=False):
 
 
 def parse_isotime(timestr):
-    """Parse time from ISO 8601 format"""
+    """Parse time from ISO 8601 format."""
     try:
         return iso8601.parse_date(timestr)
     except iso8601.ParseError as e:
@@ -66,7 +67,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
 
 
 def normalize_time(timestamp):
-    """Normalize time in arbitrary timezone to UTC naive object"""
+    """Normalize time in arbitrary timezone to UTC naive object."""
     offset = timestamp.utcoffset()
     if offset is None:
         return timestamp
@@ -75,14 +76,14 @@ def normalize_time(timestamp):
 
 def is_older_than(before, seconds):
     """Return True if before is older than seconds."""
-    if isinstance(before, basestring):
+    if isinstance(before, six.string_types):
         before = parse_strtime(before).replace(tzinfo=None)
     return utcnow() - before > datetime.timedelta(seconds=seconds)
 
 
 def is_newer_than(after, seconds):
     """Return True if after is newer than seconds."""
-    if isinstance(after, basestring):
+    if isinstance(after, six.string_types):
         after = parse_strtime(after).replace(tzinfo=None)
     return after - utcnow() > datetime.timedelta(seconds=seconds)
 
@@ -103,7 +104,7 @@ def utcnow():
 
 
 def iso8601_from_timestamp(timestamp):
-    """Returns a iso8601 formated date from timestamp"""
+    """Returns a iso8601 formated date from timestamp."""
     return isotime(datetime.datetime.utcfromtimestamp(timestamp))
 
 
@@ -111,9 +112,9 @@ utcnow.override_time = None
 
 
 def set_time_override(override_time=datetime.datetime.utcnow()):
-    """
-    Override utils.utcnow to return a constant time or a list thereof,
-    one at a time.
+    """Overrides utils.utcnow.
+
+    Make it return a constant time or a list thereof, one at a time.
     """
     utcnow.override_time = override_time
 
@@ -141,7 +142,8 @@ def clear_time_override():
 def marshall_now(now=None):
     """Make an rpc-safe datetime with microseconds.
 
-    Note: tzinfo is stripped, but not required for relative times."""
+    Note: tzinfo is stripped, but not required for relative times.
+    """
     if not now:
         now = utcnow()
     return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
@@ -161,7 +163,8 @@ def unmarshall_time(tyme):
 
 
 def delta_seconds(before, after):
-    """
+    """Return the difference between two timing objects.
+
     Compute the difference in seconds between two date, time, or
     datetime objects (as a float, to microsecond resolution).
     """
@@ -174,8 +177,7 @@ def delta_seconds(before, after):
 
 
 def is_soon(dt, window):
-    """
-    Determines if time is going to happen in the next window seconds.
+    """Determines if time is going to happen in the next window seconds.
 
     :params dt: the time
     :params window: minimum seconds to remain to consider the time not soon
diff --git a/conductor/openstack/common/version.py b/conductor/openstack/common/version.py
deleted file mode 100644
index 080a89e..0000000
--- a/conductor/openstack/common/version.py
+++ /dev/null
@@ -1,94 +0,0 @@
-
-#    Copyright 2012 OpenStack Foundation
-#    Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Utilities for consuming the version from pkg_resources.
-"""
-
-import pkg_resources
-
-
-class VersionInfo(object):
-
-    def __init__(self, package):
-        """Object that understands versioning for a package
-        :param package: name of the python package, such as glance, or
-                        python-glanceclient
-        """
-        self.package = package
-        self.release = None
-        self.version = None
-        self._cached_version = None
-
-    def __str__(self):
-        """Make the VersionInfo object behave like a string."""
-        return self.version_string()
-
-    def __repr__(self):
-        """Include the name."""
-        return "VersionInfo(%s:%s)" % (self.package, self.version_string())
-
-    def _get_version_from_pkg_resources(self):
-        """Get the version of the package from the pkg_resources record
-        associated with the package."""
-        try:
-            requirement = pkg_resources.Requirement.parse(self.package)
-            provider = pkg_resources.get_provider(requirement)
-            return provider.version
-        except pkg_resources.DistributionNotFound:
-            # The most likely cause for this is running tests in a tree
-            # produced from a tarball where the package itself has not been
-            # installed into anything. Revert to setup-time logic.
-            from conductor.openstack.common import setup
-            return setup.get_version(self.package)
-
-    def release_string(self):
-        """Return the full version of the package including suffixes indicating
-        VCS status.
-        """
-        if self.release is None:
-            self.release = self._get_version_from_pkg_resources()
-
-        return self.release
-
-    def version_string(self):
-        """Return the short version minus any alpha/beta tags."""
-        if self.version is None:
-            parts = []
-            for part in self.release_string().split('.'):
-                if part[0].isdigit():
-                    parts.append(part)
-                else:
-                    break
-            self.version = ".".join(parts)
-
-        return self.version
-
-    # Compatibility functions
-    canonical_version_string = version_string
-    version_string_with_vcs = release_string
-
-    def cached_version_string(self, prefix=""):
-        """Generate an object which will expand in a string context to
-        the results of version_string(). We do this so that don't
-        call into pkg_resources every time we start up a program when
-        passing version information into the CONF constructor, but
-        rather only do the calculation when and if a version is requested
-        """
-        if not self._cached_version:
-            self._cached_version = "%s%s" % (prefix,
-                                             self.version_string())
-        return self._cached_version
diff --git a/conductor/openstack/common/wsgi.py b/conductor/openstack/common/wsgi.py
deleted file mode 100644
index fd9f7f4..0000000
--- a/conductor/openstack/common/wsgi.py
+++ /dev/null
@@ -1,797 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Utility methods for working with WSGI servers."""
-
-import eventlet
-eventlet.patcher.monkey_patch(all=False, socket=True)
-
-import datetime
-import errno
-import socket
-import sys
-import time
-
-import eventlet.wsgi
-from oslo.config import cfg
-import routes
-import routes.middleware
-import webob.dec
-import webob.exc
-from xml.dom import minidom
-from xml.parsers import expat
-
-from conductor.openstack.common import exception
-from conductor.openstack.common.gettextutils import _
-from conductor.openstack.common import jsonutils
-from conductor.openstack.common import log as logging
-from conductor.openstack.common import service
-from conductor.openstack.common import sslutils
-from conductor.openstack.common import xmlutils
-
-socket_opts = [
-    cfg.IntOpt('backlog',
-               default=4096,
-               help="Number of backlog requests to configure the socket with"),
-    cfg.IntOpt('tcp_keepidle',
-               default=600,
-               help="Sets the value of TCP_KEEPIDLE in seconds for each "
-                    "server socket. Not supported on OS X."),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(socket_opts)
-
-LOG = logging.getLogger(__name__)
-
-
-def run_server(application, port):
-    """Run a WSGI server with the given application."""
-    sock = eventlet.listen(('0.0.0.0', port))
-    eventlet.wsgi.server(sock, application)
-
-
-class Service(service.Service):
-    """
-    Provides a Service API for wsgi servers.
-
-    This gives us the ability to launch wsgi servers with the
-    Launcher classes in service.py.
-    """
-
-    def __init__(self, application, port,
-                 host='0.0.0.0', backlog=4096, threads=1000):
-        self.application = application
-        self._port = port
-        self._host = host
-        self._backlog = backlog if backlog else CONF.backlog
-        super(Service, self).__init__(threads)
-
-    def _get_socket(self, host, port, backlog):
-        # TODO(dims): eventlet's green dns/socket module does not actually
-        # support IPv6 in getaddrinfo(). We need to get around this in the
-        # future or monitor upstream for a fix
-        info = socket.getaddrinfo(host,
-                                  port,
-                                  socket.AF_UNSPEC,
-                                  socket.SOCK_STREAM)[0]
-        family = info[0]
-        bind_addr = info[-1]
-
-        sock = None
-        retry_until = time.time() + 30
-        while not sock and time.time() < retry_until:
-            try:
-                sock = eventlet.listen(bind_addr,
-                                       backlog=backlog,
-                                       family=family)
-                if sslutils.is_enabled():
-                    sock = sslutils.wrap(sock)
-
-            except socket.error, err:
-                if err.args[0] != errno.EADDRINUSE:
-                    raise
-                eventlet.sleep(0.1)
-        if not sock:
-            raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
-                               "after trying for 30 seconds") %
-                               {'host': host, 'port': port})
-        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-        # sockets can hang around forever without keepalive
-        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-
-        # This option isn't available in the OS X version of eventlet
-        if hasattr(socket, 'TCP_KEEPIDLE'):
-            sock.setsockopt(socket.IPPROTO_TCP,
-                            socket.TCP_KEEPIDLE,
-                            CONF.tcp_keepidle)
-
-        return sock
-
-    def start(self):
-        """Start serving this service using the provided server instance.
-
-        :returns: None
-
-        """
-        super(Service, self).start()
-        self._socket = self._get_socket(self._host, self._port, self._backlog)
-        self.tg.add_thread(self._run, self.application, self._socket)
-
-    @property
-    def backlog(self):
-        return self._backlog
-
-    @property
-    def host(self):
-        return self._socket.getsockname()[0] if self._socket else self._host
-
-    @property
-    def port(self):
-        return self._socket.getsockname()[1] if self._socket else self._port
-
-    def stop(self):
-        """Stop serving this API.
-
-        :returns: None
-
-        """
-        super(Service, self).stop()
-
-    def _run(self, application, socket):
-        """Start a WSGI server in a new green thread."""
-        logger = logging.getLogger('eventlet.wsgi')
-        eventlet.wsgi.server(socket,
-                             application,
-                             custom_pool=self.tg.pool,
-                             log=logging.WritableLogger(logger))
-
-
-class Middleware(object):
-    """
-    Base WSGI middleware wrapper. These classes require an application to be
-    initialized that will be called next.  By default the middleware will
-    simply call its wrapped app, or you can override __call__ to customize its
-    behavior.
-    """
-
-    def __init__(self, application):
-        self.application = application
-
-    def process_request(self, req):
-        """
-        Called on each request.
-
-        If this returns None, the next application down the stack will be
-        executed. If it returns a response then that response will be returned
-        and execution will stop here.
-        """
-        return None
-
-    def process_response(self, response):
-        """Do whatever you'd like to the response."""
-        return response
-
-    @webob.dec.wsgify
-    def __call__(self, req):
-        response = self.process_request(req)
-        if response:
-            return response
-        response = req.get_response(self.application)
-        return self.process_response(response)
-
-
-class Debug(Middleware):
-    """
-    Helper class that can be inserted into any WSGI application chain
-    to get information about the request and response.
-    """
-
-    @webob.dec.wsgify
-    def __call__(self, req):
-        print ("*" * 40) + " REQUEST ENVIRON"
-        for key, value in req.environ.items():
-            print key, "=", value
-        print
-        resp = req.get_response(self.application)
-
-        print ("*" * 40) + " RESPONSE HEADERS"
-        for (key, value) in resp.headers.iteritems():
-            print key, "=", value
-        print
-
-        resp.app_iter = self.print_generator(resp.app_iter)
-
-        return resp
-
-    @staticmethod
-    def print_generator(app_iter):
-        """
-        Iterator that prints the contents of a wrapper string iterator
-        when iterated.
-        """
-        print ("*" * 40) + " BODY"
-        for part in app_iter:
-            sys.stdout.write(part)
-            sys.stdout.flush()
-            yield part
-        print
-
-
-class Router(object):
-
-    """
-    WSGI middleware that maps incoming requests to WSGI apps.
-    """
-
-    def __init__(self, mapper):
-        """
-        Create a router for the given routes.Mapper.
-
-        Each route in `mapper` must specify a 'controller', which is a
-        WSGI app to call.  You'll probably want to specify an 'action' as
-        well and have your controller be a wsgi.Controller, who will route
-        the request to the action method.
-
-        Examples:
-          mapper = routes.Mapper()
-          sc = ServerController()
-
-          # Explicit mapping of one route to a controller+action
-          mapper.connect(None, "/svrlist", controller=sc, action="list")
-
-          # Actions are all implicitly defined
-          mapper.resource("server", "servers", controller=sc)
-
-          # Pointing to an arbitrary WSGI app.  You can specify the
-          # {path_info:.*} parameter so the target app can be handed just that
-          # section of the URL.
-          mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
-        """
-        self.map = mapper
-        self._router = routes.middleware.RoutesMiddleware(self._dispatch,
-                                                          self.map)
-
-    @webob.dec.wsgify
-    def __call__(self, req):
-        """
-        Route the incoming request to a controller based on self.map.
-        If no match, return a 404.
-        """
-        return self._router
-
-    @staticmethod
-    @webob.dec.wsgify
-    def _dispatch(req):
-        """
-        Called by self._router after matching the incoming request to a route
-        and putting the information into req.environ.  Either returns 404
-        or the routed WSGI app's response.
-        """
-        match = req.environ['wsgiorg.routing_args'][1]
-        if not match:
-            return webob.exc.HTTPNotFound()
-        app = match['controller']
-        return app
-
-
-class Request(webob.Request):
-    """Add some Openstack API-specific logic to the base webob.Request."""
-
-    default_request_content_types = ('application/json', 'application/xml')
-    default_accept_types = ('application/json', 'application/xml')
-    default_accept_type = 'application/json'
-
-    def best_match_content_type(self, supported_content_types=None):
-        """Determine the requested response content-type.
-
-        Based on the query extension then the Accept header.
-        Defaults to default_accept_type if we don't find a preference
-
-        """
-        supported_content_types = (supported_content_types or
-                                   self.default_accept_types)
-
-        parts = self.path.rsplit('.', 1)
-        if len(parts) > 1:
-            ctype = 'application/{0}'.format(parts[1])
-            if ctype in supported_content_types:
-                return ctype
-
-        bm = self.accept.best_match(supported_content_types)
-        return bm or self.default_accept_type
-
-    def get_content_type(self, allowed_content_types=None):
-        """Determine content type of the request body.
-
-        Does not do any body introspection, only checks header
-
-        """
-        if "Content-Type" not in self.headers:
-            return None
-
-        content_type = self.content_type
-        allowed_content_types = (allowed_content_types or
-                                 self.default_request_content_types)
-
-        if content_type not in allowed_content_types:
-            raise exception.InvalidContentType(content_type=content_type)
-        return content_type
-
-
-class Resource(object):
-    """
-    WSGI app that handles (de)serialization and controller dispatch.
-
-    Reads routing information supplied by RoutesMiddleware and calls
-    the requested action method upon its deserializer, controller,
-    and serializer. Those three objects may implement any of the basic
-    controller action methods (create, update, show, index, delete)
-    along with any that may be specified in the api router. A 'default'
-    method may also be implemented to be used in place of any
-    non-implemented actions. Deserializer methods must accept a request
-    argument and return a dictionary. Controller methods must accept a
-    request argument. Additionally, they must also accept keyword
-    arguments that represent the keys returned by the Deserializer. They
-    may raise a webob.exc exception or return a dict, which will be
-    serialized by requested content type.
-    """
-    def __init__(self, controller, deserializer=None, serializer=None):
-        """
-        :param controller: object that implement methods created by routes lib
-        :param deserializer: object that supports webob request deserialization
-                             through controller-like actions
-        :param serializer: object that supports webob response serialization
-                           through controller-like actions
-        """
-        self.controller = controller
-        self.serializer = serializer or ResponseSerializer()
-        self.deserializer = deserializer or RequestDeserializer()
-
-    @webob.dec.wsgify(RequestClass=Request)
-    def __call__(self, request):
-        """WSGI method that controls (de)serialization and method dispatch."""
-
-        try:
-            action, action_args, accept = self.deserialize_request(request)
-        except exception.InvalidContentType:
-            msg = _("Unsupported Content-Type")
-            return webob.exc.HTTPUnsupportedMediaType(explanation=msg)
-        except exception.MalformedRequestBody:
-            msg = _("Malformed request body")
-            return webob.exc.HTTPBadRequest(explanation=msg)
-
-        action_result = self.execute_action(action, request, **action_args)
-        try:
-            return self.serialize_response(action, action_result, accept)
-        # return unserializable result (typically a webob exc)
-        except Exception:
-            return action_result
-
-    def deserialize_request(self, request):
-        return self.deserializer.deserialize(request)
-
-    def serialize_response(self, action, action_result, accept):
-        return self.serializer.serialize(action_result, accept, action)
-
-    def execute_action(self, action, request, **action_args):
-        return self.dispatch(self.controller, action, request, **action_args)
-
-    def dispatch(self, obj, action, *args, **kwargs):
-        """Find action-specific method on self and call it."""
-        try:
-            method = getattr(obj, action)
-        except AttributeError:
-            method = getattr(obj, 'default')
-
-        return method(*args, **kwargs)
-
-    def get_action_args(self, request_environment):
-        """Parse dictionary created by routes library."""
-        try:
-            args = request_environment['wsgiorg.routing_args'][1].copy()
-        except Exception:
-            return {}
-
-        try:
-            del args['controller']
-        except KeyError:
-            pass
-
-        try:
-            del args['format']
-        except KeyError:
-            pass
-
-        return args
-
-
-class ActionDispatcher(object):
-    """Maps method name to local methods through action name."""
-
-    def dispatch(self, *args, **kwargs):
-        """Find and call local method."""
-        action = kwargs.pop('action', 'default')
-        action_method = getattr(self, str(action), self.default)
-        return action_method(*args, **kwargs)
-
-    def default(self, data):
-        raise NotImplementedError()
-
-
-class DictSerializer(ActionDispatcher):
-    """Default request body serialization"""
-
-    def serialize(self, data, action='default'):
-        return self.dispatch(data, action=action)
-
-    def default(self, data):
-        return ""
-
-
-class JSONDictSerializer(DictSerializer):
-    """Default JSON request body serialization"""
-
-    def default(self, data):
-        def sanitizer(obj):
-            if isinstance(obj, datetime.datetime):
-                _dtime = obj - datetime.timedelta(microseconds=obj.microsecond)
-                return _dtime.isoformat()
-            return unicode(obj)
-        return jsonutils.dumps(data, default=sanitizer)
-
-
-class XMLDictSerializer(DictSerializer):
-
-    def __init__(self, metadata=None, xmlns=None):
-        """
-        :param metadata: information needed to deserialize xml into
-                         a dictionary.
-        :param xmlns: XML namespace to include with serialized xml
-        """
-        super(XMLDictSerializer, self).__init__()
-        self.metadata = metadata or {}
-        self.xmlns = xmlns
-
-    def default(self, data):
-        # We expect data to contain a single key which is the XML root.
-        root_key = data.keys()[0]
-        doc = minidom.Document()
-        node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
-
-        return self.to_xml_string(node)
-
-    def to_xml_string(self, node, has_atom=False):
-        self._add_xmlns(node, has_atom)
-        return node.toprettyxml(indent='    ', encoding='UTF-8')
-
-    #NOTE (ameade): the has_atom should be removed after all of the
-    # xml serializers and view builders have been updated to the current
-    # spec that required all responses include the xmlns:atom, the has_atom
-    # flag is to prevent current tests from breaking
-    def _add_xmlns(self, node, has_atom=False):
-        if self.xmlns is not None:
-            node.setAttribute('xmlns', self.xmlns)
-        if has_atom:
-            node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
-
-    def _to_xml_node(self, doc, metadata, nodename, data):
-        """Recursive method to convert data members to XML nodes."""
-        result = doc.createElement(nodename)
-
-        # Set the xml namespace if one is specified
-        # TODO(justinsb): We could also use prefixes on the keys
-        xmlns = metadata.get('xmlns', None)
-        if xmlns:
-            result.setAttribute('xmlns', xmlns)
-
-        #TODO(bcwaldon): accomplish this without a type-check
-        if type(data) is list:
-            collections = metadata.get('list_collections', {})
-            if nodename in collections:
-                metadata = collections[nodename]
-                for item in data:
-                    node = doc.createElement(metadata['item_name'])
-                    node.setAttribute(metadata['item_key'], str(item))
-                    result.appendChild(node)
-                return result
-            singular = metadata.get('plurals', {}).get(nodename, None)
-            if singular is None:
-                if nodename.endswith('s'):
-                    singular = nodename[:-1]
-                else:
-                    singular = 'item'
-            for item in data:
-                node = self._to_xml_node(doc, metadata, singular, item)
-                result.appendChild(node)
-        #TODO(bcwaldon): accomplish this without a type-check
-        elif type(data) is dict:
-            collections = metadata.get('dict_collections', {})
-            if nodename in collections:
-                metadata = collections[nodename]
-                for k, v in data.items():
-                    node = doc.createElement(metadata['item_name'])
-                    node.setAttribute(metadata['item_key'], str(k))
-                    text = doc.createTextNode(str(v))
-                    node.appendChild(text)
-                    result.appendChild(node)
-                return result
-            attrs = metadata.get('attributes', {}).get(nodename, {})
-            for k, v in data.items():
-                if k in attrs:
-                    result.setAttribute(k, str(v))
-                else:
-                    node = self._to_xml_node(doc, metadata, k, v)
-                    result.appendChild(node)
-        else:
-            # Type is atom
-            node = doc.createTextNode(str(data))
-            result.appendChild(node)
-        return result
-
-    def _create_link_nodes(self, xml_doc, links):
-        link_nodes = []
-        for link in links:
-            link_node = xml_doc.createElement('atom:link')
-            link_node.setAttribute('rel', link['rel'])
-            link_node.setAttribute('href', link['href'])
-            if 'type' in link:
-                link_node.setAttribute('type', link['type'])
-            link_nodes.append(link_node)
-        return link_nodes
-
-
-class ResponseHeadersSerializer(ActionDispatcher):
-    """Default response headers serialization"""
-
-    def serialize(self, response, data, action):
-        self.dispatch(response, data, action=action)
-
-    def default(self, response, data):
-        response.status_int = 200
-
-
-class ResponseSerializer(object):
-    """Encode the necessary pieces into a response object"""
-
-    def __init__(self, body_serializers=None, headers_serializer=None):
-        self.body_serializers = {
-            'application/xml': XMLDictSerializer(),
-            'application/json': JSONDictSerializer(),
-        }
-        self.body_serializers.update(body_serializers or {})
-
-        self.headers_serializer = (headers_serializer or
-                                   ResponseHeadersSerializer())
-
-    def serialize(self, response_data, content_type, action='default'):
-        """Serialize a dict into a string and wrap in a wsgi.Request object.
-
-        :param response_data: dict produced by the Controller
-        :param content_type: expected mimetype of serialized response body
-
-        """
-        response = webob.Response()
-        self.serialize_headers(response, response_data, action)
-        self.serialize_body(response, response_data, content_type, action)
-        return response
-
-    def serialize_headers(self, response, data, action):
-        self.headers_serializer.serialize(response, data, action)
-
-    def serialize_body(self, response, data, content_type, action):
-        response.headers['Content-Type'] = content_type
-        if data is not None:
-            serializer = self.get_body_serializer(content_type)
-            response.body = serializer.serialize(data, action)
-
-    def get_body_serializer(self, content_type):
-        try:
-            return self.body_serializers[content_type]
-        except (KeyError, TypeError):
-            raise exception.InvalidContentType(content_type=content_type)
-
-
-class RequestHeadersDeserializer(ActionDispatcher):
-    """Default request headers deserializer"""
-
-    def deserialize(self, request, action):
-        return self.dispatch(request, action=action)
-
-    def default(self, request):
-        return {}
-
-
-class RequestDeserializer(object):
-    """Break up a Request object into more useful pieces."""
-
-    def __init__(self, body_deserializers=None, headers_deserializer=None,
-                 supported_content_types=None):
-
-        self.supported_content_types = supported_content_types
-
-        self.body_deserializers = {
-            'application/xml': XMLDeserializer(),
-            'application/json': JSONDeserializer(),
-        }
-        self.body_deserializers.update(body_deserializers or {})
-
-        self.headers_deserializer = (headers_deserializer or
-                                     RequestHeadersDeserializer())
-
-    def deserialize(self, request):
-        """Extract necessary pieces of the request.
-
-        :param request: Request object
-        :returns: tuple of (expected controller action name, dictionary of
-                  keyword arguments to pass to the controller, the expected
-                  content type of the response)
-
-        """
-        action_args = self.get_action_args(request.environ)
-        action = action_args.pop('action', None)
-
-        action_args.update(self.deserialize_headers(request, action))
-        action_args.update(self.deserialize_body(request, action))
-
-        accept = self.get_expected_content_type(request)
-
-        return (action, action_args, accept)
-
-    def deserialize_headers(self, request, action):
-        return self.headers_deserializer.deserialize(request, action)
-
-    def deserialize_body(self, request, action):
-        if not len(request.body) > 0:
-            LOG.debug(_("Empty body provided in request"))
-            return {}
-
-        try:
-            content_type = request.get_content_type()
-        except exception.InvalidContentType:
-            LOG.debug(_("Unrecognized Content-Type provided in request"))
-            raise
-
-        if content_type is None:
-            LOG.debug(_("No Content-Type provided in request"))
-            return {}
-
-        try:
-            deserializer = self.get_body_deserializer(content_type)
-        except exception.InvalidContentType:
-            LOG.debug(_("Unable to deserialize body as provided Content-Type"))
-            raise
-
-        return deserializer.deserialize(request.body, action)
-
-    def get_body_deserializer(self, content_type):
-        try:
-            return self.body_deserializers[content_type]
-        except (KeyError, TypeError):
-            raise exception.InvalidContentType(content_type=content_type)
-
-    def get_expected_content_type(self, request):
-        return request.best_match_content_type(self.supported_content_types)
-
-    def get_action_args(self, request_environment):
-        """Parse dictionary created by routes library."""
-        try:
-            args = request_environment['wsgiorg.routing_args'][1].copy()
-        except Exception:
-            return {}
-
-        try:
-            del args['controller']
-        except KeyError:
-            pass
-
-        try:
-            del args['format']
-        except KeyError:
-            pass
-
-        return args
-
-
-class TextDeserializer(ActionDispatcher):
-    """Default request body deserialization"""
-
-    def deserialize(self, datastring, action='default'):
-        return self.dispatch(datastring, action=action)
-
-    def default(self, datastring):
-        return {}
-
-
-class JSONDeserializer(TextDeserializer):
-
-    def _from_json(self, datastring):
-        try:
-            return jsonutils.loads(datastring)
-        except ValueError:
-            msg = _("cannot understand JSON")
-            raise exception.MalformedRequestBody(reason=msg)
-
-    def default(self, datastring):
-        return {'body': self._from_json(datastring)}
-
-
-class XMLDeserializer(TextDeserializer):
-
-    def __init__(self, metadata=None):
-        """
-        :param metadata: information needed to deserialize xml into
-                         a dictionary.
-        """
-        super(XMLDeserializer, self).__init__()
-        self.metadata = metadata or {}
-
-    def _from_xml(self, datastring):
-        plurals = set(self.metadata.get('plurals', {}))
-
-        try:
-            node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0]
-            return {node.nodeName: self._from_xml_node(node, plurals)}
-        except expat.ExpatError:
-            msg = _("cannot understand XML")
-            raise exception.MalformedRequestBody(reason=msg)
-
-    def _from_xml_node(self, node, listnames):
-        """Convert a minidom node to a simple Python type.
-
-        :param listnames: list of XML node names whose subnodes should
-                          be considered list items.
-
-        """
-
-        if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
-            return node.childNodes[0].nodeValue
-        elif node.nodeName in listnames:
-            return [self._from_xml_node(n, listnames) for n in node.childNodes]
-        else:
-            result = dict()
-            for attr in node.attributes.keys():
-                result[attr] = node.attributes[attr].nodeValue
-            for child in node.childNodes:
-                if child.nodeType != node.TEXT_NODE:
-                    result[child.nodeName] = self._from_xml_node(child,
-                                                                 listnames)
-            return result
-
-    def find_first_child_named(self, parent, name):
-        """Search a nodes children for the first child with a given name"""
-        for node in parent.childNodes:
-            if node.nodeName == name:
-                return node
-        return None
-
-    def find_children_named(self, parent, name):
-        """Return all of a nodes children who have the given name"""
-        for node in parent.childNodes:
-            if node.nodeName == name:
-                yield node
-
-    def extract_text(self, node):
-        """Get the text field contained by the given node"""
-        if len(node.childNodes) == 1:
-            child = node.childNodes[0]
-            if child.nodeType == child.TEXT_NODE:
-                return child.nodeValue
-        return ""
-
-    def default(self, datastring):
-        return {'body': self._from_xml(datastring)}
diff --git a/conductor/openstack/common/xmlutils.py b/conductor/openstack/common/xmlutils.py
index 3370048..b131d3e 100644
--- a/conductor/openstack/common/xmlutils.py
+++ b/conductor/openstack/common/xmlutils.py
@@ -1,6 +1,6 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
-# Copyright 2013 IBM
+# Copyright 2013 IBM Corp.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
diff --git a/conductor/rabbitmq.py b/conductor/rabbitmq.py
deleted file mode 100644
index a9776e5..0000000
--- a/conductor/rabbitmq.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Copyright (c) 2013 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from eventlet import patcher
-puka = patcher.import_patched('puka')
-#import puka
-import anyjson
-import config
-
-
-class RmqClient(object):
-    def __init__(self):
-        settings = config.CONF.rabbitmq
-        self._client = puka.Client('amqp://{0}:{1}@{2}:{3}/{4}'.format(
-            settings.login,
-            settings.password,
-            settings.host,
-            settings.port,
-            settings.virtual_host
-        ))
-        self._connected = False
-
-    def __enter__(self):
-        self.connect()
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        self.close()
-        return False
-
-    def connect(self):
-        if not self._connected:
-            promise = self._client.connect()
-            self._client.wait(promise, timeout=10000)
-            self._connected = True
-
-    def close(self):
-        if self._connected:
-            self._client.close()
-            self._connected = False
-
-    def declare(self, queue, exchange=None):
-        promise = self._client.queue_declare(str(queue), durable=True)
-        self._client.wait(promise)
-
-        if exchange:
-            promise = self._client.exchange_declare(
-                str(exchange),
-                durable=True)
-            self._client.wait(promise)
-            promise = self._client.queue_bind(
-                str(queue), str(exchange), routing_key=str(queue))
-            self._client.wait(promise)
-
-    def send(self, message, key, exchange='', timeout=None):
-        if not self._connected:
-            raise RuntimeError('Not connected to RabbitMQ')
-
-        headers = {'message_id': message.id}
-
-        promise = self._client.basic_publish(
-            exchange=str(exchange),
-            routing_key=str(key),
-            body=anyjson.dumps(message.body),
-            headers=headers)
-        self._client.wait(promise, timeout=timeout)
-
-    def open(self, queue):
-        if not self._connected:
-            raise RuntimeError('Not connected to RabbitMQ')
-
-        return Subscription(self._client, queue)
-
-
-class Subscription(object):
-    def __init__(self, client, queue):
-        self._client = client
-        self._queue = queue
-        self._promise = None
-        self._lastMessage = None
-
-    def __enter__(self):
-        self._promise = self._client.basic_consume(
-            queue=self._queue,
-            prefetch_count=1)
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        self._ack_last()
-        promise = self._client.basic_cancel(self._promise)
-        self._client.wait(promise)
-        return False
-
-    def _ack_last(self):
-        if self._lastMessage:
-            self._client.basic_ack(self._lastMessage)
-            self._lastMessage = None
-
-    def get_message(self, timeout=None):
-        if not self._promise:
-            raise RuntimeError(
-                "Subscription object must be used within 'with' block")
-        self._ack_last()
-        self._lastMessage = self._client.wait(self._promise, timeout=timeout)
-        msg = Message()
-        msg.body = anyjson.loads(self._lastMessage['body'])
-        msg.id = self._lastMessage['headers'].get('message_id')
-        return msg
-
-
-class Message(object):
-    def __init__(self):
-        self._body = {}
-        self._id = ''
-
-    @property
-    def body(self):
-        return self._body
-
-    @body.setter
-    def body(self, value):
-        self._body = value
-
-    @property
-    def id(self):
-        return self._id
-
-    @id.setter
-    def id(self, value):
-        self._id = value or ''
diff --git a/conductor/reporting.py b/conductor/reporting.py
index d9d5fdf..293650f 100644
--- a/conductor/reporting.py
+++ b/conductor/reporting.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 import xml_code_engine
-import rabbitmq
+from muranocommon.mq import Message
 
 
 class Reporter(object):
@@ -32,7 +32,7 @@ class Reporter(object):
             'environment_id': self._environment_id
         }
 
-        msg = rabbitmq.Message()
+        msg = Message()
         msg.body = body
         msg.id = self._task_id
 
diff --git a/conductor/version.py b/conductor/version.py
deleted file mode 100644
index f17cad4..0000000
--- a/conductor/version.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#    Copyright 2012 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from conductor.openstack.common import version as common_version
-
-version_info = common_version.VersionInfo('conductor')
diff --git a/etc/conductor.conf b/etc/conductor.conf
index e1a9ba2..d071d5f 100644
--- a/etc/conductor.conf
+++ b/etc/conductor.conf
@@ -9,7 +9,7 @@ verbose=True
 
 # Directory where conductor's data directory located. 
 # "data" must be subdirectory to this.
-data_dir = /etc/murano-conductor
+#data_dir = /etc/murano-conductor
 
 [heat]
 
diff --git a/openstack-common.conf b/openstack-common.conf
index 0437737..bf48e12 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,26 @@
 [DEFAULT]
 
 # The list of modules to copy from openstack-common
-modules=setup,wsgi,config,exception,gettextutils,importutils,jsonutils,log,xmlutils,sslutils,service,notifier,local,install_venv_common,version,timeutils,eventlet_backdoor,threadgroup,loopingcall,uuidutils
+module=exception
+module=gettextutils
+module=importutils
+module=jsonutils
+module=log
+module=xmlutils
+module=sslutils
+module=service
+module=notifier
+module=local
+module=install_venv_common
+module=timeutils
+module=eventlet_backdoor
+module=threadgroup
+module=loopingcall
+module=uuidutils
+module=uuidutils
+module=uuidutils
+module=fileutils
+module=lockutils
 
 # The base module to hold the copy of openstack.common
 base=conductor
\ No newline at end of file
diff --git a/tools/pip-requires b/requirements.txt
similarity index 52%
rename from tools/pip-requires
rename to requirements.txt
index 47a1e71..fbd5e63 100644
--- a/tools/pip-requires
+++ b/requirements.txt
@@ -7,7 +7,8 @@ PasteDeploy
 iso8601>=0.1.4
 python-heatclient==0.2.1
 jsonschema==2.0.0
+netaddr
 
-#http://tarballs.openstack.org/oslo-config/oslo-config-2013.1b4.tar.gz#egg=oslo-config
 oslo.config
 deep
+http://github.com/sergmelikyan/murano-common/releases/download/0.1/muranocommon-0.1.tar.gz#egg=muranocommon-0.1
diff --git a/setup.cfg b/setup.cfg
index 6e6f655..23c69fc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,34 @@
+[metadata]
+name = conductor
+summary = The Conductor is orchestration engine server
+version = 2013.1
+description-file =
+    README.rst
+license = Apache License, Version 2.0
+author = Mirantis, Inc.
+author-email = murano-all@lists.openstack.org
+home-page = htts://launchpad.net/murano
+classifier =
+    Development Status :: 5 - Production/Stable
+    Environment :: OpenStack
+    Intended Audience :: Developers
+    Intended Audience :: Information Technology
+    License :: OSI Approved :: Apache Software License
+    Operating System :: OS Independent
+    Programming Language :: Python
+
+[files]
+packages =
+    conductor
+
+[global]
+setup-hooks =
+    pbr.hooks.setup_hook
+
+[entry_points]
+console_scripts =
+    conductor = conductor.cmd.run:main
+
 [build_sphinx]
 all_files = 1
 build-dir = doc/build
diff --git a/setup.py b/setup.py
index fb9da8c..07fba76 100644
--- a/setup.py
+++ b/setup.py
@@ -1,49 +1,26 @@
 #!/usr/bin/python
-# Copyright (c) 2010 OpenStack, LLC.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+#    Copyright (c) 2013 Mirantis, Inc.
 #
-#    http://www.apache.org/licenses/LICENSE-2.0
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
 #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 
 import setuptools
 
-from conductor.openstack.common import setup
-
-requires = setup.parse_requirements()
-depend_links = setup.parse_dependency_links()
-project = 'conductor'
 
 setuptools.setup(
-    name=project,
-    version=setup.get_version(project, '2013.1'),
-    description='The Conductor is orchestration engine server',
-    license='Apache License (2.0)',
-    author='Mirantis, Inc.',
-    author_email='openstack@lists.launchpad.net',
-    url='http://conductor.openstack.org/',
-    packages=setuptools.find_packages(exclude=['bin']),
-    test_suite='nose.collector',
-    cmdclass=setup.get_cmdclass(),
-    include_package_data=True,
-    install_requires=requires,
-    dependency_links=depend_links,
-    classifiers=[
-        'Development Status :: 4 - Beta',
-        'License :: OSI Approved :: Apache Software License',
-        'Operating System :: POSIX :: Linux',
-        'Programming Language :: Python :: 2.7',
-        'Environment :: No Input/Output (Daemon)',
-        'Environment :: OpenStack',
+    setup_requires=[
+        'd2to1>=0.2.10,<0.3',
+        'pbr>=0.5,<0.6'
     ],
-    scripts=['bin/conductor'],
-    py_modules=[]
+    d2to1=True,
 )
diff --git a/tools/test-requires b/test-requirements.txt
similarity index 100%
rename from tools/test-requires
rename to test-requirements.txt
diff --git a/tests/conductor/test_methods.py b/tests/conductor/test_methods.py
index 55c790f..2665265 100644
--- a/tests/conductor/test_methods.py
+++ b/tests/conductor/test_methods.py
@@ -15,7 +15,6 @@
 
 import unittest
 from conductor.app import ConductorWorkflowService
-import conductor.rabbitmq as rabbitmq
 from conductor.workflow import Workflow
 import conductor.xml_code_engine as engine
 
diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh
new file mode 100755
index 0000000..2ea2694
--- /dev/null
+++ b/tools/config/generate_sample.sh
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+
+print_hint() {
+    echo "Try \`${0##*/} --help' for more information." >&2
+}
+
+PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \
+                 --long help,base-dir:,package-name:,output-dir: -- "$@")
+
+if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
+
+eval set -- "$PARSED_OPTIONS"
+
+while true; do
+    case "$1" in
+        -h|--help)
+            echo "${0##*/} [options]"
+            echo ""
+            echo "options:"
+            echo "-h, --help                show brief help"
+            echo "-b, --base-dir=DIR        Project base directory (required)"
+            echo "-p, --package-name=NAME   Project package name"
+            echo "-o, --output-dir=DIR      File output directory"
+            exit 0
+            ;;
+        -b|--base-dir)
+            shift
+            BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
+            shift
+            ;;
+        -p|--package-name)
+            shift
+            PACKAGENAME=`echo $1`
+            shift
+            ;;
+        -o|--output-dir)
+            shift
+            OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
+            shift
+            ;;
+        --)
+            break
+            ;;
+    esac
+done
+
+if [ -z $BASEDIR ] || ! [ -d $BASEDIR ]
+then
+    echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
+fi
+
+PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
+
+OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
+if ! [ -d $OUTPUTDIR ]
+then
+    echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
+    exit 1
+fi
+
+BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
+FILES=$(find $BASEDIR/$PACKAGENAME -type f -name "*.py" ! -path "*/tests/*" \
+        -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
+
+export EVENTLET_NO_GREENDNS=yes
+
+MODULEPATH=conductor.openstack.common.config.generator
+OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
+python -m $MODULEPATH $FILES > $OUTPUTFILE
diff --git a/tools/install_venv.py b/tools/install_venv.py
index c3b8171..0011a8b 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -4,151 +4,74 @@
 # Administrator of the National Aeronautics and Space Administration.
 # All Rights Reserved.
 #
-# Copyright 2010 OpenStack LLC.
+# Copyright 2010 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
 #
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#         http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Installation script for Glance's development virtualenv
-"""
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
 
+import ConfigParser
 import os
-import subprocess
 import sys
 
-
-ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-VENV = os.path.join(ROOT, '.venv')
-PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
-TEST_REQUIRES = os.path.join(ROOT, 'tools', 'test-requires')
+import install_venv_common as install_venv  # flake8: noqa
 
 
-def die(message, *args):
-    print >> sys.stderr, message % args
-    sys.exit(1)
-
-
-def run_command(cmd, redirect_output=True, check_exit_code=True):
-    """
-    Runs a command in an out-of-process shell, returning the
-    output of that command.  Working directory is ROOT.
-    """
-    if redirect_output:
-        stdout = subprocess.PIPE
-    else:
-        stdout = None
-
-    proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
-    output = proc.communicate()[0]
-    if check_exit_code and proc.returncode != 0:
-        die('Command "%s" failed.\n%s', ' '.join(cmd), output)
-    return output
-
-
-HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
-                                    check_exit_code=False).strip())
-HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
-                                    check_exit_code=False).strip())
-
-
-def check_dependencies():
-    """Make sure virtualenv is in the path."""
-
-    if not HAS_VIRTUALENV:
-        print 'not found.'
-        # Try installing it via easy_install...
-        if HAS_EASY_INSTALL:
-            print 'Installing virtualenv via easy_install...',
-            if not run_command(['which', 'easy_install']):
-                die('ERROR: virtualenv not found.\n\n'
-                    'Balancer development requires virtualenv, please install'
-                    ' it using your favorite package management tool')
-            print 'done.'
-    print 'done.'
-
-
-def create_virtualenv(venv=VENV):
-    """
-    Creates the virtual environment and installs PIP only into the
-    virtual environment
-    """
-    print 'Creating venv...',
-    run_command(['virtualenv', '-q', '--no-site-packages', VENV])
-    print 'done.'
-    print 'Installing pip in virtualenv...',
-    if not run_command(['tools/with_venv.sh', 'easy_install',
-                        'pip>1.0']).strip():
-        die("Failed to install pip.")
-    print 'done.'
-
-
-def pip_install(*args):
-    run_command(['tools/with_venv.sh',
-                 'pip', 'install', '--upgrade'] + list(args),
-                redirect_output=False)
-
-
-def install_dependencies(venv=VENV):
-    print 'Installing dependencies with pip (this can take a while)...'
-
-    pip_install('pip')
-
-    pip_install('-r', PIP_REQUIRES)
-    pip_install('-r', TEST_REQUIRES)
-
-    # Tell the virtual env how to "import glance"
-    py_ver = _detect_python_version(venv)
-    pthfile = os.path.join(venv, "lib", py_ver,
-                           "site-packages", "balancer.pth")
-    f = open(pthfile, 'w')
-    f.write("%s\n" % ROOT)
-
-
-def _detect_python_version(venv):
-    lib_dir = os.path.join(venv, "lib")
-    for pathname in os.listdir(lib_dir):
-        if pathname.startswith('python'):
-            return pathname
-    raise Exception('Unable to detect Python version')
-
-
-def print_help():
+def print_help(project, venv, root):
     help = """
- Glance development environment setup is complete.
+    %(project)s development environment setup is complete.
 
- Glance development uses virtualenv to track and manage Python dependencies
- while in development and testing.
+    %(project)s development uses virtualenv to track and manage Python
+    dependencies while in development and testing.
 
- To activate the Glance virtualenv for the extent of your current shell session
- you can run:
+    To activate the %(project)s virtualenv for the extent of your current
+    shell session you can run:
 
- $ source .venv/bin/activate
+    $ source %(venv)s/bin/activate
 
- Or, if you prefer, you can run commands in the virtualenv on a case by case
- basis by running:
+    Or, if you prefer, you can run commands in the virtualenv on a case by
+    case basis by running:
 
- $ tools/with_venv.sh <your command>
-
- Also, make test will automatically use the virtualenv.
+    $ %(root)s/tools/with_venv.sh <your command>
     """
-    print help
+    print help % dict(project=project, venv=venv, root=root)
 
 
 def main(argv):
-    check_dependencies()
-    create_virtualenv()
-    install_dependencies()
-    print_help()
+    root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+
+    if os.environ.get('tools_path'):
+        root = os.environ['tools_path']
+    venv = os.path.join(root, '.venv')
+    if os.environ.get('venv'):
+        venv = os.environ['venv']
+
+    pip_requires = os.path.join(root, 'requirements.txt')
+    test_requires = os.path.join(root, 'test-requirements.txt')
+    py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
+    setup_cfg = ConfigParser.ConfigParser()
+    setup_cfg.read('setup.cfg')
+    project = setup_cfg.get('metadata', 'name')
+
+    install = install_venv.InstallVenv(
+        root, venv, pip_requires, test_requires, py_version, project)
+    options = install.parse_args(argv)
+    install.check_python_version()
+    install.check_dependencies()
+    install.create_virtualenv(no_site_packages=options.no_site_packages)
+    install.install_dependencies()
+    install.post_process()
+    print_help(project, venv, root)
 
 if __name__ == '__main__':
     main(sys.argv)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 4130656..f428c1e 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -18,10 +18,15 @@
 """Provides methods needed by installation script for OpenStack development
 virtual environments.
 
+Since this script is used to bootstrap a virtualenv from the system's Python
+environment, it should be kept strictly compatible with Python 2.6.
+
 Synced in from openstack-common
 """
 
-import argparse
+from __future__ import print_function
+
+import optparse
 import os
 import subprocess
 import sys
@@ -29,17 +34,18 @@ import sys
 
 class InstallVenv(object):
 
-    def __init__(self, root, venv, pip_requires, test_requires, py_version,
+    def __init__(self, root, venv, requirements,
+                 test_requirements, py_version,
                  project):
         self.root = root
         self.venv = venv
-        self.pip_requires = pip_requires
-        self.test_requires = test_requires
+        self.requirements = requirements
+        self.test_requirements = test_requirements
         self.py_version = py_version
         self.project = project
 
     def die(self, message, *args):
-        print >> sys.stderr, message % args
+        print(message % args, file=sys.stderr)
         sys.exit(1)
 
     def check_python_version(self):
@@ -70,11 +76,13 @@ class InstallVenv(object):
     def get_distro(self):
         if (os.path.exists('/etc/fedora-release') or
                 os.path.exists('/etc/redhat-release')):
-            return Fedora(self.root, self.venv, self.pip_requires,
-                          self.test_requires, self.py_version, self.project)
+            return Fedora(
+                self.root, self.venv, self.requirements,
+                self.test_requirements, self.py_version, self.project)
         else:
-            return Distro(self.root, self.venv, self.pip_requires,
-                          self.test_requires, self.py_version, self.project)
+            return Distro(
+                self.root, self.venv, self.requirements,
+                self.test_requirements, self.py_version, self.project)
 
     def check_dependencies(self):
         self.get_distro().install_virtualenv()
@@ -86,20 +94,15 @@ class InstallVenv(object):
         virtual environment.
         """
         if not os.path.isdir(self.venv):
-            print 'Creating venv...',
+            print('Creating venv...', end=' ')
             if no_site_packages:
                 self.run_command(['virtualenv', '-q', '--no-site-packages',
                                  self.venv])
             else:
                 self.run_command(['virtualenv', '-q', self.venv])
-            print 'done.'
-            print 'Installing pip in venv...',
-            if not self.run_command(['tools/with_venv.sh', 'easy_install',
-                                    'pip>1.0']).strip():
-                self.die("Failed to install pip.")
-            print 'done.'
+            print('done.')
         else:
-            print "venv already exists..."
+            print("venv already exists...")
             pass
 
     def pip_install(self, *args):
@@ -108,35 +111,27 @@ class InstallVenv(object):
                          redirect_output=False)
 
     def install_dependencies(self):
-        print 'Installing dependencies with pip (this can take a while)...'
+        print('Installing dependencies with pip (this can take a while)...')
 
         # First things first, make sure our venv has the latest pip and
-        # distribute.
-        # NOTE: we keep pip at version 1.1 since the most recent version causes
-        # the .venv creation to fail. See:
-        # https://bugs.launchpad.net/nova/+bug/1047120
-        self.pip_install('pip==1.1')
-        self.pip_install('distribute')
+        # setuptools.
+        self.pip_install('pip>=1.3')
+        self.pip_install('setuptools')
 
-        # Install greenlet by hand - just listing it in the requires file does
-        # not
-        # get it installed in the right order
-        self.pip_install('greenlet')
-
-        self.pip_install('-r', self.pip_requires)
-        self.pip_install('-r', self.test_requires)
+        self.pip_install('-r', self.requirements)
+        self.pip_install('-r', self.test_requirements)
 
     def post_process(self):
         self.get_distro().post_process()
 
     def parse_args(self, argv):
         """Parses command-line arguments."""
-        parser = argparse.ArgumentParser()
-        parser.add_argument('-n', '--no-site-packages',
-                            action='store_true',
-                            help="Do not inherit packages from global Python "
-                                 "install")
-        return parser.parse_args(argv[1:])
+        parser = optparse.OptionParser()
+        parser.add_option('-n', '--no-site-packages',
+                          action='store_true',
+                          help="Do not inherit packages from global Python "
+                               "install")
+        return parser.parse_args(argv[1:])[0]
 
 
 class Distro(InstallVenv):
@@ -150,12 +145,12 @@ class Distro(InstallVenv):
             return
 
         if self.check_cmd('easy_install'):
-            print 'Installing virtualenv via easy_install...',
+            print('Installing virtualenv via easy_install...', end=' ')
             if self.run_command(['easy_install', 'virtualenv']):
-                print 'Succeeded'
+                print('Succeeded')
                 return
             else:
-                print 'Failed'
+                print('Failed')
 
         self.die('ERROR: virtualenv not found.\n\n%s development'
                  ' requires virtualenv, please install it using your'
@@ -180,10 +175,6 @@ class Fedora(Distro):
         return self.run_command_with_code(['rpm', '-q', pkg],
                                           check_exit_code=False)[1] == 0
 
-    def yum_install(self, pkg, **kwargs):
-        print "Attempting to install '%s' via yum" % pkg
-        self.run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs)
-
     def apply_patch(self, originalfile, patchfile):
         self.run_command(['patch', '-N', originalfile, patchfile],
                          check_exit_code=False)
@@ -193,7 +184,7 @@ class Fedora(Distro):
             return
 
         if not self.check_pkg('python-virtualenv'):
-            self.yum_install('python-virtualenv', check_exit_code=False)
+            self.die("Please install 'python-virtualenv'.")
 
         super(Fedora, self).install_virtualenv()
 
@@ -206,12 +197,13 @@ class Fedora(Distro):
         This can be removed when the fix is applied upstream.
 
         Nova: https://bugs.launchpad.net/nova/+bug/884915
-        Upstream: https://bitbucket.org/which_linden/eventlet/issue/89
+        Upstream: https://bitbucket.org/eventlet/eventlet/issue/89
+        RHEL: https://bugzilla.redhat.com/958868
         """
 
         # Install "patch" program if it's not there
         if not self.check_pkg('patch'):
-            self.yum_install('patch')
+            self.die("Please install 'patch'.")
 
         # Apply the eventlet patch
         self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
diff --git a/tox.ini b/tox.ini
index 959459c..2ffae66 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,8 +8,8 @@ setenv = VIRTUAL_ENV={envdir}
          NOSE_OPENSTACK_RED=0.05
          NOSE_OPENSTACK_YELLOW=0.025
          NOSE_OPENSTACK_SHOW_ELAPSED=1
-deps = -r{toxinidir}/tools/pip-requires
-       -r{toxinidir}/tools/test-requires
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
 commands = nosetests
 
 [testenv:pep8]