Move to oslo-config and update common. Also add missing provider_method to get.

This commit is contained in:
Endre Karlson 2013-02-26 21:57:12 +01:00
parent 448cc8924e
commit c2826fa655
27 changed files with 726 additions and 1845 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,101 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Multiple DB API backend support.
Supported configuration options:
`db_backend`: DB backend name or full module path to DB backend module.
`dbapi_use_tpool`: Enable thread pooling of DB API calls.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
*NOTE*: There are bugs in eventlet when using tpool combined with
threading locks. The python logging module happens to use such locks. To
work around this issue, be sure to specify thread=False with
eventlet.monkey_patch().
A bug for eventlet has been filed here:
https://bitbucket.org/eventlet/eventlet/issue/137/
"""
import functools
from oslo.config import cfg
from billingstack.openstack.common import importutils
from billingstack.openstack.common import lockutils
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for db'),
cfg.BoolOpt('dbapi_use_tpool',
default=False,
help='Enable the experimental use of thread pooling for '
'all DB API calls')
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
class DBAPI(object):
def __init__(self, backend_mapping=None):
if backend_mapping is None:
backend_mapping = {}
self.__backend = None
self.__backend_mapping = backend_mapping
@lockutils.synchronized('dbapi_backend', 'billingstack-')
def __get_backend(self):
"""Get the actual backend. May be a module or an instance of
a class. Doesn't matter to us. We do this synchronized as it's
possible multiple greenthreads started very quickly trying to do
DB calls and eventlet can switch threads before self.__backend gets
assigned.
"""
if self.__backend:
# Another thread assigned it
return self.__backend
backend_name = CONF.db_backend
self.__use_tpool = CONF.dbapi_use_tpool
if self.__use_tpool:
from eventlet import tpool
self.__tpool = tpool
# Import the untranslated name if we don't have a
# mapping.
backend_path = self.__backend_mapping.get(backend_name,
backend_name)
backend_mod = importutils.import_module(backend_path)
self.__backend = backend_mod.get_backend()
return self.__backend
def __getattr__(self, key):
backend = self.__backend or self.__get_backend()
attr = getattr(backend, key)
if not self.__use_tpool or not hasattr(attr, '__call__'):
return attr
def tpool_wrapper(*args, **kwargs):
return self.__tpool.execute(attr, *args, **kwargs)
functools.update_wrapper(tpool_wrapper, attr)
return tpool_wrapper

View File

@ -0,0 +1,45 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from billingstack.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")

View File

@ -24,8 +24,7 @@ import traceback
import eventlet
import eventlet.backdoor
import greenlet
from billingstack.openstack.common import cfg
from oslo.config import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
@ -52,12 +51,20 @@ def _print_greenthreads():
print
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print threadId
traceback.print_stack(stack)
print
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:

View File

@ -38,13 +38,17 @@ import functools
import inspect
import itertools
import json
import logging
import xmlrpclib
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import timeutils
LOG = logging.getLogger(__name__)
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0):
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
@ -80,7 +84,9 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > 3:
if level > max_depth:
LOG.error(_('Max serialization depth exceeded on object: %d %s'),
level, value)
return '?'
# The try block may not be necessary after the class check above,
@ -89,7 +95,8 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level)
level=level,
max_depth=max_depth)
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled

View File

@ -40,7 +40,8 @@ import stat
import sys
import traceback
from billingstack.openstack.common import cfg
from oslo.config import cfg
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import jsonutils
from billingstack.openstack.common import local
@ -324,16 +325,11 @@ def _create_logging_excepthook(product_name):
def setup(product_name):
"""Setup logging."""
sys.excepthook = _create_logging_excepthook(product_name)
if CONF.log_config:
try:
logging.config.fileConfig(CONF.log_config)
except Exception:
traceback.print_exc()
raise
logging.config.fileConfig(CONF.log_config)
else:
_setup_logging_from_conf(product_name)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):

View File

@ -15,7 +15,8 @@
import uuid
from billingstack.openstack.common import cfg
from oslo.config import cfg
from billingstack.openstack.common import context
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import importutils

View File

@ -13,8 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from billingstack.openstack.common import cfg
from billingstack.openstack.common import jsonutils
from billingstack.openstack.common import log as logging

View File

@ -13,8 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from billingstack.openstack.common import cfg
from billingstack.openstack.common import context as req_context
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import log as logging

View File

@ -15,7 +15,8 @@
'''messaging based notification driver, with message envelopes'''
from billingstack.openstack.common import cfg
from oslo.config import cfg
from billingstack.openstack.common import context as req_context
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import log as logging

View File

@ -28,7 +28,8 @@ For some wrappers that add message versioning to rpc, see:
import inspect
import logging
from billingstack.openstack.common import cfg
from oslo.config import cfg
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import importutils
from billingstack.openstack.common import local

View File

@ -25,13 +25,19 @@ Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
from billingstack.openstack.common import excutils
from billingstack.openstack.common.gettextutils import _
@ -40,6 +46,17 @@ from billingstack.openstack.common import log as logging
from billingstack.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
default=False,
help='Enable a fast single reply queue if using AMQP based '
'RPC like RabbitMQ or Qpid.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
@ -51,6 +68,7 @@ class Pool(pools.Pool):
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
@ -60,6 +78,16 @@ class Pool(pools.Pool):
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
@ -137,6 +165,12 @@ class ConnectionContext(rpc_common.Connection):
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
@ -148,8 +182,45 @@ class ConnectionContext(rpc_common.Connection):
raise rpc_common.InvalidRPCConnectionReuse()
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
ending=False, log_failure=True):
class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s'
', message : %s') % (msg_id, message_data))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
@ -168,13 +239,22 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
'failure': failure}
if ending:
msg['ending'] = True
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
@ -182,13 +262,14 @@ class RpcContext(rpc_common.CommonRpcContext):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
ending, log_failure)
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
@ -204,6 +285,7 @@ def unpack_context(conf, msg):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
@ -224,15 +306,86 @@ def pack_context(msg, context):
msg.update(context_d)
class ProxyCallback(object):
"""Calls methods on a proxy object based on method and args."""
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
def __init__(self, conf, proxy, connection_pool):
self.proxy = proxy
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by
the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green
thread.
"""
def __init__(self, conf, callback, connection_pool):
"""
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
@ -251,6 +404,7 @@ class ProxyCallback(object):
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
@ -293,11 +447,68 @@ class ProxyCallback(object):
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending" flag"""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
LOG.exception(_('Timed out waiting for RPC response.'))
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
@ -307,6 +518,7 @@ class MulticallWaiter(object):
self._done = False
self._got_ending = False
self._conf = conf
self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
@ -318,6 +530,7 @@ class MulticallWaiter(object):
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
@ -353,22 +566,41 @@ def create_connection(conf, new, connection_pool):
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# TODO(pekowski): Remove all these comments in Havana.
# For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
# For amqp_rpc_single_reply_queue = True,
# The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
# TODO(pekowski): Remove this flag and the code under the if clause
# in Havana.
if not conf.amqp_rpc_single_reply_queue:
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
else:
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
@ -385,6 +617,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
@ -393,6 +626,7 @@ def cast(conf, context, topic, msg, connection_pool):
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
@ -400,6 +634,7 @@ def fanout_cast(conf, context, topic, msg, connection_pool):
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
@ -409,6 +644,7 @@ def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
@ -420,6 +656,7 @@ def notify(conf, context, topic, msg, connection_pool, envelope):
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:

View File

@ -21,7 +21,8 @@ import copy
import sys
import traceback
from billingstack.openstack.common import cfg
from oslo.config import cfg
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import importutils
from billingstack.openstack.common import jsonutils
@ -48,8 +49,8 @@ deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'billingstack.version': <RPC Envelope Version as a String>,
'billingstack.message': <Application Message Payload, JSON encoded>
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
@ -65,8 +66,8 @@ to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'billingstack.version'
_MESSAGE_KEY = 'billingstack.message'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
# TODO(russellb) Turn this on after Grizzly.
@ -124,6 +125,10 @@ class Timeout(RPCException):
message = _("Timeout while waiting on RPC response.")
class DuplicateMessageError(RPCException):
message = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
message = _("Invalid reuse of an RPC connection.")
@ -196,6 +201,28 @@ class Connection(object):
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.

View File

@ -28,8 +28,8 @@ import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
from billingstack.openstack.common import cfg
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import network_utils
from billingstack.openstack.common.rpc import amqp as rpc_amqp
@ -165,9 +165,10 @@ class ConsumerBase(object):
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
message.ack()
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
message.ack()
self.queue.consume(*args, callback=_callback, **options)
@ -197,6 +198,7 @@ class DirectConsumer(ConsumerBase):
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
@ -750,6 +752,30 @@ class Connection(object):
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
)
def create_connection(conf, new=True):
"""Create a connection"""

View File

@ -22,8 +22,8 @@ import uuid
import eventlet
import greenlet
from oslo.config import cfg
from billingstack.openstack.common import cfg
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import importutils
from billingstack.openstack.common import jsonutils
@ -560,6 +560,34 @@ class Connection(object):
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection"""

View File

@ -23,8 +23,8 @@ import uuid
import eventlet
import greenlet
from oslo.config import cfg
from billingstack.openstack.common import cfg
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import importutils
from billingstack.openstack.common import jsonutils
@ -216,12 +216,18 @@ class ZmqClient(object):
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
def cast(self, msg_id, topic, data, envelope=False):
msg_id = msg_id or 0
if serialize:
data = rpc_common.serialize_msg(data, force_envelope)
self.outq.send(map(bytes, (msg_id, topic, 'cast', _serialize(data))))
if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
@ -320,7 +326,7 @@ class ConsumerBase(object):
else:
return [result]
def process(self, style, target, proxy, ctx, data):
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
@ -432,12 +438,14 @@ class ZmqProxy(ZmqBaseReactor):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
msg_id, topic, style, in_msg = data
topic = topic.split('.', 1)[0]
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~') or topic.startswith('zmq_replies'):
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
@ -520,6 +528,21 @@ class ZmqProxy(ZmqBaseReactor):
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
@ -540,38 +563,50 @@ class ZmqReactor(ZmqBaseReactor):
self.mapping[sock].send(data)
return
msg_id, topic, style, in_msg = data
ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
ctx = RpcContext.unmarshal(ctx)
proxy = self.proxies[sock]
self.pool.spawn_n(self.process, style, topic,
proxy, ctx, request)
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Only consume on the base topic name.
topic = topic.split('.', 1)[0]
LOG.info(_("Create Consumer for topic (%(topic)s)") %
{'topic': topic})
# Subscription scenarios
if fanout:
subscribe = ('', fanout)[type(fanout) == str]
sock_type = zmq.SUB
topic = 'fanout~' + topic
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
@ -582,9 +617,11 @@ class Connection(rpc_common.Connection):
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
@ -593,8 +630,8 @@ class Connection(rpc_common.Connection):
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, serialize=True,
force_envelope=False, _msg_id=None):
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
@ -603,7 +640,7 @@ def _cast(addr, context, topic, msg, timeout=None, serialize=True,
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, serialize, force_envelope)
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
@ -612,7 +649,7 @@ def _cast(addr, context, topic, msg, timeout=None, serialize=True,
def _call(addr, context, topic, msg, timeout=None,
serialize=True, force_envelope=False):
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
@ -642,20 +679,31 @@ def _call(addr, context, topic, msg, timeout=None,
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload,
serialize=serialize, force_envelope=force_envelope)
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
responses = _deserialize(msg[-1])[-1]['args']['response']
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
@ -676,8 +724,8 @@ def _call(addr, context, topic, msg, timeout=None,
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
force_envelope=False, _msg_id=None):
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
@ -703,11 +751,11 @@ def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, serialize,
force_envelope, _msg_id)
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
serialize, force_envelope)
envelope)
def create_connection(conf, new=True):
@ -746,8 +794,7 @@ def notify(conf, context, topic, msg, **kwargs):
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
kwargs['serialize'] = kwargs.pop('envelope')
kwargs['force_envelope'] = True
kwargs['envelope'] = kwargs.get('envelope', True)
cast(conf, context, topic, msg, **kwargs)
@ -772,8 +819,9 @@ def _get_ctxt():
return ZMQ_CTX
def _get_matchmaker():
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
matchmaker = importutils.import_object(CONF.rpc_zmq_matchmaker)
matchmaker = importutils.import_object(
CONF.rpc_zmq_matchmaker, *args, **kwargs)
return matchmaker

View File

@ -22,7 +22,8 @@ import contextlib
import itertools
import json
from billingstack.openstack.common import cfg
from oslo.config import cfg
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import log as logging
@ -201,11 +202,12 @@ class FanoutRingExchange(RingExchange):
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
def __init__(self):
def __init__(self, host='localhost'):
self.host = host
super(Exchange, self).__init__()
def run(self, key):
return [(key.split('.')[0] + '.localhost', 'localhost')]
return [('.'.join((key.split('.')[0], self.host)), self.host)]
class DirectExchange(Exchange):
@ -237,11 +239,11 @@ class MatchMakerLocalhost(MatchMakerBase):
Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
def __init__(self):
def __init__(self, host='localhost'):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), LocalhostExchange())
self.add_binding(FanoutBinding(), LocalhostExchange(host))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), LocalhostExchange())
self.add_binding(TopicBinding(), LocalhostExchange(host))
class MatchMakerStub(MatchMakerBase):

View File

@ -28,8 +28,8 @@ import time
import eventlet
import logging as std_logging
from oslo.config import cfg
from billingstack.openstack.common import cfg
from billingstack.openstack.common import eventlet_backdoor
from billingstack.openstack.common.gettextutils import _
from billingstack.openstack.common import importutils

View File

@ -43,6 +43,11 @@ def parse_mailmap(mailmap='.mailmap'):
return mapping
def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
return parse_mailmap(mailmap)
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
@ -117,9 +122,9 @@ def _run_shell_command(cmd, throw_on_error=False):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = output.communicate()
if output.returncode and throw_on_error:
raise Exception("%s returned %d" % cmd, output.returncode)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
@ -127,14 +132,26 @@ def _run_shell_command(cmd, throw_on_error=False):
return out[0].strip()
def _get_git_directory():
parent_dir = os.path.dirname(__file__)
while True:
git_dir = os.path.join(parent_dir, '.git')
if os.path.exists(git_dir):
return git_dir
parent_dir, child = os.path.split(parent_dir)
if not child: # reached to root dir
return None
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
git_dir = _get_git_directory()
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
if git_dir:
git_log_cmd = 'git --git-dir=%s log --stat' % git_dir
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
mailmap = _parse_git_mailmap(git_dir)
with open(new_changelog, "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
else:
@ -146,13 +163,15 @@ def generate_authors():
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
git_dir = _get_git_directory()
if not os.getenv('SKIP_GENERATE_AUTHORS'):
if os.path.isdir('.git'):
if git_dir:
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
git_log_cmd = ("git --git-dir=" + git_dir +
" log --format='%aN <%aE>' | sort -u | "
"egrep -v '" + jenkins_email + "'")
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
mailmap = _parse_git_mailmap(git_dir)
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
@ -258,43 +277,48 @@ def get_cmdclass():
return cmdclass
def _get_revno():
def _get_revno(git_dir):
"""Return the number of commits since the most recent tag.
We use git-describe to find this out, but if there are no
tags then we fall back to counting commits since the beginning
of time.
"""
describe = _run_shell_command("git describe --always")
describe = _run_shell_command(
"git --git-dir=%s describe --always" % git_dir)
if "-" in describe:
return describe.rsplit("-", 2)[-2]
# no tags found
revlist = _run_shell_command("git rev-list --abbrev-commit HEAD")
revlist = _run_shell_command(
"git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
return len(revlist.splitlines())
def get_version_from_git(pre_version):
def _get_version_from_git(pre_version):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
git_dir = _get_git_directory()
if git_dir:
if pre_version:
try:
return _run_shell_command(
"git describe --exact-match",
"git --git-dir=" + git_dir + " describe --exact-match",
throw_on_error=True).replace('-', '.')
except Exception:
sha = _run_shell_command("git log -n1 --pretty=format:%h")
return "%s.a%s.g%s" % (pre_version, _get_revno(), sha)
sha = _run_shell_command(
"git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
else:
return _run_shell_command(
"git describe --always").replace('-', '.')
"git --git-dir=" + git_dir + " describe --always").replace(
'-', '.')
return None
def get_version_from_pkg_info(package_name):
def _get_version_from_pkg_info(package_name):
"""Get the version from PKG-INFO file if we can."""
try:
pkg_info_file = open('PKG-INFO', 'r')
@ -325,10 +349,10 @@ def get_version(package_name, pre_version=None):
version = os.environ.get("OSLO_PACKAGE_VERSION", None)
if version:
return version
version = get_version_from_pkg_info(package_name)
version = _get_version_from_pkg_info(package_name)
if version:
return version
version = get_version_from_git(pre_version)
version = _get_version_from_git(pre_version)
if version:
return version
raise Exception("Versioning for this project requires either an sdist"

View File

@ -49,11 +49,11 @@ class VersionInfo(object):
provider = pkg_resources.get_provider(requirement)
return provider.version
except pkg_resources.DistributionNotFound:
# The most likely cause for this is running tests in a tree with
# The most likely cause for this is running tests in a tree
# produced from a tarball where the package itself has not been
# installed into anything. Check for a PKG-INFO file.
# installed into anything. Revert to setup-time logic.
from billingstack.openstack.common import setup
return setup.get_version_from_pkg_info(self.package)
return setup.get_version(self.package)
def release_string(self):
"""Return the full version of the package including suffixes indicating

View File

@ -462,7 +462,7 @@ class Connection(base.Connection):
def payment_method_get(self, pm_id, **kw):
row = self._get_id_or_name(models.PaymentMethod, pm_id)
return self._dict(row)
return self._dict(row, extra=['provider_method'])
def payment_method_update(self, pm_id, values):
row = self._update(models.PaymentMethod, pm_id, values)

View File

@ -22,6 +22,8 @@ import os
import sys
from wsgiref import simple_server
from oslo.config import cfg
from billingstack.api import app
from billingstack import service
from billingstack.openstack.common import cfg

View File

@ -20,6 +20,8 @@ import os
import sys
sys.path.insert(0, os.getcwd())
from oslo.config import cfg
from billingstack.storage.impl_sqlalchemy.migration.cli import main

View File

@ -16,6 +16,9 @@
#
# Copied: billingstack
import sys
from oslo.config import cfg
from billingstack import utils
from billingstack.manage import Shell

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import os
import sys
# If ../billingstack/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'billingstack', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from oslo.config import cfg
from billingstack.openstack.common import log as logging
from billingstack.openstack.common import rpc
from billingstack.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
CONF(sys.argv[1:], project='billingstack')
def main():
logging.setup("billingstack")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()
if __name__ == '__main__':
main()

View File

@ -1,3 +1,3 @@
[DEFAULT]
modules=cfg,iniparser,importutils,excutils,local,jsonutils,timeutils,service,eventlet_backdoor,loopingcall,utils,exception,setup,version,uuidutils,processutils,db,log,gettextutils,iso8601,notifier,rpc,context,threadgroup
modules=iniparser,importutils,excutils,local,jsonutils,timeutils,service,eventlet_backdoor,loopingcall,utils,exception,setup,version,uuidutils,processutils,db,log,gettextutils,iso8601,notifier,rpc,context,threadgroup
base=billingstack

View File

@ -8,3 +8,4 @@ anyjson>=0.2.4
pycountry
iso8601
cliff
http://tarballs.openstack.org/oslo-config/oslo-config-2013.1b4.tar.gz#egg=oslo-config