Updates oslo-incubator files
Updates to the stable/grizzly version since "openstack.common.setup" has been replaced with "pbr" in later versions and this will require the conversion of setup.py before.
This commit is contained in:
parent
d7dff62e0a
commit
bddc7b19c7
File diff suppressed because it is too large
Load Diff
@ -1,81 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple class that stores security context information in the web request.
|
||||
|
||||
Projects should subclass this class if they wish to enhance the request
|
||||
context or provide additional information in their specific WSGI pipeline.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-' + str(uuid.uuid4())
|
||||
|
||||
|
||||
class RequestContext(object):
|
||||
|
||||
"""
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False,
|
||||
read_only=False, show_deleted=False, request_id=None):
|
||||
self.auth_tok = auth_tok
|
||||
self.user = user
|
||||
self.tenant = tenant
|
||||
self.is_admin = is_admin
|
||||
self.read_only = read_only
|
||||
self.show_deleted = show_deleted
|
||||
if not request_id:
|
||||
request_id = generate_request_id()
|
||||
self.request_id = request_id
|
||||
|
||||
def to_dict(self):
|
||||
return {'user': self.user,
|
||||
'tenant': self.tenant,
|
||||
'is_admin': self.is_admin,
|
||||
'read_only': self.read_only,
|
||||
'show_deleted': self.show_deleted,
|
||||
'auth_token': self.auth_tok,
|
||||
'request_id': self.request_id}
|
||||
|
||||
|
||||
def get_admin_context(show_deleted="no"):
|
||||
context = RequestContext(None,
|
||||
tenant=None,
|
||||
is_admin=True,
|
||||
show_deleted=show_deleted)
|
||||
return context
|
||||
|
||||
|
||||
def get_context_from_function_and_args(function, args, kwargs):
|
||||
"""Find an arg of type RequestContext and return it.
|
||||
|
||||
This is useful in a couple of decorators where we don't
|
||||
know much about the function we're wrapping.
|
||||
"""
|
||||
|
||||
for arg in itertools.chain(kwargs.values(), args):
|
||||
if isinstance(arg, RequestContext):
|
||||
return arg
|
||||
|
||||
return None
|
@ -1,80 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Openstack, LLC.
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import gc
|
||||
import pprint
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
import eventlet.backdoor
|
||||
import greenlet
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.IntOpt('backdoor_port',
|
||||
default=None,
|
||||
help='port for eventlet backdoor to listen')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(eventlet_backdoor_opts)
|
||||
|
||||
|
||||
def _dont_use_this():
|
||||
print "Don't use this, just disconnect instead"
|
||||
|
||||
|
||||
def _find_objects(t):
|
||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
||||
|
||||
|
||||
def _print_greenthreads():
|
||||
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||
print i, gt
|
||||
traceback.print_stack(gt.gr_frame)
|
||||
print
|
||||
|
||||
|
||||
def initialize_if_enabled():
|
||||
backdoor_locals = {
|
||||
'exit': _dont_use_this, # So we don't exit the entire process
|
||||
'quit': _dont_use_this, # So we don't exit the entire process
|
||||
'fo': _find_objects,
|
||||
'pgt': _print_greenthreads,
|
||||
}
|
||||
|
||||
if CONF.backdoor_port is None:
|
||||
return None
|
||||
|
||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||
# the last expression and set it to __builtin__._, which overwrites
|
||||
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||
# since it won't interact poorly with gettext, and it's easier to
|
||||
# read the output too.
|
||||
def displayhook(val):
|
||||
if val is not None:
|
||||
pprint.pprint(val)
|
||||
sys.displayhook = displayhook
|
||||
|
||||
sock = eventlet.listen(('localhost', CONF.backdoor_port))
|
||||
port = sock.getsockname()[1]
|
||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||
locals=backdoor_locals)
|
||||
return port
|
@ -1,51 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exception related utilities.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def save_and_reraise_exception():
|
||||
"""Save current exception, run some code and then re-raise.
|
||||
|
||||
In some cases the exception context can be cleared, resulting in None
|
||||
being attempted to be re-raised after an exception handler is run. This
|
||||
can happen when eventlet switches greenthreads or when running an
|
||||
exception handler, code raises and catches an exception. In both
|
||||
cases the exception context will be cleared.
|
||||
|
||||
To work around this, we save the exception state, run handler code, and
|
||||
then re-raise the original exception. If another exception occurs, the
|
||||
saved exception is logged and the new exception is re-raised.
|
||||
"""
|
||||
type_, value, tb = sys.exc_info()
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
logging.error(_('Original exception being dropped: %s'),
|
||||
traceback.format_exception(type_, value, tb))
|
||||
raise
|
||||
raise type_, value, tb
|
@ -1,35 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import os
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
raise
|
||||
else:
|
||||
raise
|
@ -1,59 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Import related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
"""Returns a class from a string including module and class"""
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
try:
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
except (ValueError, AttributeError):
|
||||
raise ImportError('Class %s cannot be found (%s)' %
|
||||
(class_str,
|
||||
traceback.format_exception(*sys.exc_info())))
|
||||
|
||||
|
||||
def import_object(import_str, *args, **kwargs):
|
||||
"""Import a class and return an instance of it."""
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||
"""
|
||||
Import a class and return an instance of it, first by trying
|
||||
to find the class in a default namespace, then failing back to
|
||||
a full path if not found in the default namespace.
|
||||
"""
|
||||
import_value = "%s.%s" % (name_space, import_str)
|
||||
try:
|
||||
return import_class(import_value)(*args, **kwargs)
|
||||
except ImportError:
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_module(import_str):
|
||||
"""Import a module."""
|
||||
__import__(import_str)
|
||||
return sys.modules[import_str]
|
@ -1,130 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
def __init__(self, message, lineno, line):
|
||||
self.msg = message
|
||||
self.line = line
|
||||
self.lineno = lineno
|
||||
|
||||
def __str__(self):
|
||||
return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line)
|
||||
|
||||
|
||||
class BaseParser(object):
|
||||
lineno = 0
|
||||
parse_exc = ParseError
|
||||
|
||||
def _assignment(self, key, value):
|
||||
self.assignment(key, value)
|
||||
return None, []
|
||||
|
||||
def _get_section(self, line):
|
||||
if line[-1] != ']':
|
||||
return self.error_no_section_end_bracket(line)
|
||||
if len(line) <= 2:
|
||||
return self.error_no_section_name(line)
|
||||
|
||||
return line[1:-1]
|
||||
|
||||
def _split_key_value(self, line):
|
||||
colon = line.find(':')
|
||||
equal = line.find('=')
|
||||
if colon < 0 and equal < 0:
|
||||
return self.error_invalid_assignment(line)
|
||||
|
||||
if colon < 0 or (equal >= 0 and equal < colon):
|
||||
key, value = line[:equal], line[equal + 1:]
|
||||
else:
|
||||
key, value = line[:colon], line[colon + 1:]
|
||||
|
||||
value = value.strip()
|
||||
if ((value and value[0] == value[-1]) and
|
||||
(value[0] == "\"" or value[0] == "'")):
|
||||
value = value[1:-1]
|
||||
return key.strip(), [value]
|
||||
|
||||
def parse(self, lineiter):
|
||||
key = None
|
||||
value = []
|
||||
|
||||
for line in lineiter:
|
||||
self.lineno += 1
|
||||
|
||||
line = line.rstrip()
|
||||
if not line:
|
||||
# Blank line, ends multi-line values
|
||||
if key:
|
||||
key, value = self._assignment(key, value)
|
||||
continue
|
||||
elif line[0] in (' ', '\t'):
|
||||
# Continuation of previous assignment
|
||||
if key is None:
|
||||
self.error_unexpected_continuation(line)
|
||||
else:
|
||||
value.append(line.lstrip())
|
||||
continue
|
||||
|
||||
if key:
|
||||
# Flush previous assignment, if any
|
||||
key, value = self._assignment(key, value)
|
||||
|
||||
if line[0] == '[':
|
||||
# Section start
|
||||
section = self._get_section(line)
|
||||
if section:
|
||||
self.new_section(section)
|
||||
elif line[0] in '#;':
|
||||
self.comment(line[1:].lstrip())
|
||||
else:
|
||||
key, value = self._split_key_value(line)
|
||||
if not key:
|
||||
return self.error_empty_key(line)
|
||||
|
||||
if key:
|
||||
# Flush previous assignment, if any
|
||||
self._assignment(key, value)
|
||||
|
||||
def assignment(self, key, value):
|
||||
"""Called when a full assignment is parsed"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def new_section(self, section):
|
||||
"""Called when a new section is started"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def comment(self, comment):
|
||||
"""Called when a comment is parsed"""
|
||||
pass
|
||||
|
||||
def error_invalid_assignment(self, line):
|
||||
raise self.parse_exc("No ':' or '=' found in assignment",
|
||||
self.lineno, line)
|
||||
|
||||
def error_empty_key(self, line):
|
||||
raise self.parse_exc('Key cannot be empty', self.lineno, line)
|
||||
|
||||
def error_unexpected_continuation(self, line):
|
||||
raise self.parse_exc('Unexpected continuation line',
|
||||
self.lineno, line)
|
||||
|
||||
def error_no_section_end_bracket(self, line):
|
||||
raise self.parse_exc('Invalid section (must end with ])',
|
||||
self.lineno, line)
|
||||
|
||||
def error_no_section_name(self, line):
|
||||
raise self.parse_exc('Empty section name', self.lineno, line)
|
@ -34,6 +34,7 @@ This module provides a few things:
|
||||
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import json
|
||||
@ -42,7 +43,8 @@ import xmlrpclib
|
||||
from cloudbaseinit.openstack.common import timeutils
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, level=0):
|
||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
level=0, max_depth=3):
|
||||
"""Convert a complex object into primitives.
|
||||
|
||||
Handy for JSON serialization. We can optionally handle instances,
|
||||
@ -78,12 +80,17 @@ def to_primitive(value, convert_instances=False, level=0):
|
||||
if getattr(value, '__module__', None) == 'mox':
|
||||
return 'mock'
|
||||
|
||||
if level > 3:
|
||||
if level > max_depth:
|
||||
return '?'
|
||||
|
||||
# The try block may not be necessary after the class check above,
|
||||
# but just in case ...
|
||||
try:
|
||||
recursive = functools.partial(to_primitive,
|
||||
convert_instances=convert_instances,
|
||||
convert_datetime=convert_datetime,
|
||||
level=level,
|
||||
max_depth=max_depth)
|
||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||
# for our purposes, make it a datetime type which is explicitly
|
||||
# handled
|
||||
@ -91,33 +98,19 @@ def to_primitive(value, convert_instances=False, level=0):
|
||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||
|
||||
if isinstance(value, (list, tuple)):
|
||||
o = []
|
||||
for v in value:
|
||||
o.append(to_primitive(v, convert_instances=convert_instances,
|
||||
level=level))
|
||||
return o
|
||||
return [recursive(v) for v in value]
|
||||
elif isinstance(value, dict):
|
||||
o = {}
|
||||
for k, v in value.iteritems():
|
||||
o[k] = to_primitive(v, convert_instances=convert_instances,
|
||||
level=level)
|
||||
return o
|
||||
elif isinstance(value, datetime.datetime):
|
||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
||||
elif convert_datetime and isinstance(value, datetime.datetime):
|
||||
return timeutils.strtime(value)
|
||||
elif hasattr(value, 'iteritems'):
|
||||
return to_primitive(dict(value.iteritems()),
|
||||
convert_instances=convert_instances,
|
||||
level=level + 1)
|
||||
return recursive(dict(value.iteritems()), level=level + 1)
|
||||
elif hasattr(value, '__iter__'):
|
||||
return to_primitive(list(value),
|
||||
convert_instances=convert_instances,
|
||||
level=level)
|
||||
return recursive(list(value))
|
||||
elif convert_instances and hasattr(value, '__dict__'):
|
||||
# Likely an instance of something. Watch for cycles.
|
||||
# Ignore class member vars.
|
||||
return to_primitive(value.__dict__,
|
||||
convert_instances=convert_instances,
|
||||
level=level + 1)
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
else:
|
||||
return value
|
||||
except TypeError:
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -26,6 +26,9 @@ class WeakLocal(corolocal.local):
|
||||
def __getattribute__(self, attr):
|
||||
rval = corolocal.local.__getattribute__(self, attr)
|
||||
if rval:
|
||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||
# reference, not the value itself. We therefore need to lookup
|
||||
# the weak reference and return the inner value here.
|
||||
rval = rval()
|
||||
return rval
|
||||
|
||||
@ -34,4 +37,12 @@ class WeakLocal(corolocal.local):
|
||||
return corolocal.local.__setattr__(self, attr, value)
|
||||
|
||||
|
||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||
store = WeakLocal()
|
||||
|
||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||
# when it falls out of scope in the code that uses the thread local storage. A
|
||||
# "strong" store will hold a reference to the object so that it never falls out
|
||||
# of scope.
|
||||
weak_store = WeakLocal()
|
||||
strong_store = corolocal.local
|
||||
|
@ -1,233 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from eventlet import semaphore
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import fileutils
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
util_opts = [
|
||||
cfg.BoolOpt('disable_process_locking', default=False,
|
||||
help='Whether to disable inter-process locks'),
|
||||
cfg.StrOpt('lock_path',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'../')),
|
||||
help='Directory to use for lock files')
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(util_opts)
|
||||
|
||||
|
||||
class _InterProcessLock(object):
|
||||
"""Lock implementation which allows multiple locks, working around
|
||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||
not require any cleanup. Since the lock is always held on a file
|
||||
descriptor rather than outside of the process, the lock gets dropped
|
||||
automatically if the process crashes, even if __exit__ is not executed.
|
||||
|
||||
There are no guarantees regarding usage by multiple green threads in a
|
||||
single process here. This lock works only between processes. Exclusive
|
||||
access between local threads should be achieved using the semaphores
|
||||
in the @synchronized decorator.
|
||||
|
||||
Note these locks are released when the descriptor is closed, so it's not
|
||||
safe to close the file descriptor while another green thread holds the
|
||||
lock. Just opening and closing the lock file can break synchronisation,
|
||||
so lock files must be accessed only using this abstraction.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.lockfile = None
|
||||
self.fname = name
|
||||
|
||||
def __enter__(self):
|
||||
self.lockfile = open(self.fname, 'w')
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Using non-blocking locks since green threads are not
|
||||
# patched to deal with blocking locking calls.
|
||||
# Also upon reading the MSDN docs for locking(), it seems
|
||||
# to have a laughable 10 attempts "blocking" mechanism.
|
||||
self.trylock()
|
||||
return self
|
||||
except IOError, e:
|
||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||
# external locks synchronise things like iptables
|
||||
# updates - give it some time to prevent busy spinning
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
raise
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
try:
|
||||
self.unlock()
|
||||
self.lockfile.close()
|
||||
except IOError:
|
||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
||||
self.fname)
|
||||
|
||||
def trylock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def unlock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _WindowsLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
|
||||
|
||||
def unlock(self):
|
||||
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
|
||||
|
||||
|
||||
class _PosixLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
|
||||
def unlock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
import msvcrt
|
||||
InterProcessLock = _WindowsLock
|
||||
else:
|
||||
import fcntl
|
||||
InterProcessLock = _PosixLock
|
||||
|
||||
_semaphores = weakref.WeakValueDictionary()
|
||||
|
||||
|
||||
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
|
||||
"""Synchronization decorator.
|
||||
|
||||
Decorating a method like so::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
ensures that only one thread will execute the bar method at a time.
|
||||
|
||||
Different methods can share the same lock::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
@synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
This way only one of either foo or bar can be executing at a time.
|
||||
|
||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
|
||||
|
||||
The external keyword argument denotes whether this lock should work across
|
||||
multiple processes. This means that if two different workers both run a
|
||||
a method decorated with @synchronized('mylock', external=True), only one
|
||||
of them will execute at a time.
|
||||
|
||||
The lock_path keyword argument is used to specify a special location for
|
||||
external lock files to live. If nothing is set, then CONF.lock_path is
|
||||
used as a default.
|
||||
"""
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
||||
# See http://stackoverflow.com/questions/5390569/dyn
|
||||
# amically-allocating-and-destroying-mutexes
|
||||
sem = _semaphores.get(name, semaphore.Semaphore())
|
||||
if name not in _semaphores:
|
||||
# this check is not racy - we're already holding ref locally
|
||||
# so GC won't remove the item and there was no IO switch
|
||||
# (only valid in greenthreads)
|
||||
_semaphores[name] = sem
|
||||
|
||||
with sem:
|
||||
LOG.debug(_('Got semaphore "%(lock)s" for method '
|
||||
'"%(method)s"...'), {'lock': name,
|
||||
'method': f.__name__})
|
||||
if external and not CONF.disable_process_locking:
|
||||
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
|
||||
'method "%(method)s"...'),
|
||||
{'lock': name, 'method': f.__name__})
|
||||
cleanup_dir = False
|
||||
|
||||
# We need a copy of lock_path because it is non-local
|
||||
local_lock_path = lock_path
|
||||
if not local_lock_path:
|
||||
local_lock_path = CONF.lock_path
|
||||
|
||||
if not local_lock_path:
|
||||
cleanup_dir = True
|
||||
local_lock_path = tempfile.mkdtemp()
|
||||
|
||||
if not os.path.exists(local_lock_path):
|
||||
cleanup_dir = True
|
||||
fileutils.ensure_tree(local_lock_path)
|
||||
|
||||
# NOTE(mikal): the lock name cannot contain directory
|
||||
# separators
|
||||
safe_name = name.replace(os.sep, '_')
|
||||
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
|
||||
lock_file_path = os.path.join(local_lock_path,
|
||||
lock_file_name)
|
||||
|
||||
try:
|
||||
lock = InterProcessLock(lock_file_path)
|
||||
with lock:
|
||||
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
|
||||
'for method "%(method)s"...'),
|
||||
{'lock': name,
|
||||
'path': lock_file_path,
|
||||
'method': f.__name__})
|
||||
retval = f(*args, **kwargs)
|
||||
finally:
|
||||
# NOTE(vish): This removes the tempdir if we needed
|
||||
# to create one. This is used to cleanup
|
||||
# the locks left behind by unit tests.
|
||||
if cleanup_dir:
|
||||
shutil.rmtree(local_lock_path)
|
||||
else:
|
||||
retval = f(*args, **kwargs)
|
||||
|
||||
return retval
|
||||
return inner
|
||||
return wrap
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
@ -40,28 +40,91 @@ import stat
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from oslo.config import cfg
|
||||
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import jsonutils
|
||||
from cloudbaseinit.openstack.common import local
|
||||
from cloudbaseinit.openstack.common import notifier
|
||||
|
||||
|
||||
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
common_cli_opts = [
|
||||
cfg.BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output (set logging level to '
|
||||
'DEBUG instead of default WARNING level).'),
|
||||
cfg.BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output (set logging level to '
|
||||
'INFO instead of default WARNING level).'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
cfg.StrOpt('log-config',
|
||||
metavar='PATH',
|
||||
help='If this option is specified, the logging configuration '
|
||||
'file specified is used and overrides any other logging '
|
||||
'options specified. Please see the Python logging module '
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=_DEFAULT_LOG_FORMAT,
|
||||
metavar='FORMAT',
|
||||
help='A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If not set, logging will go to stdout.'),
|
||||
cfg.StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The directory to keep log files in '
|
||||
'(will be prepended to --log-file)'),
|
||||
cfg.BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging.'),
|
||||
cfg.StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='syslog facility to receive log lines')
|
||||
]
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error'),
|
||||
cfg.StrOpt('logfile_mode',
|
||||
default='0644',
|
||||
help='Default file mode used when creating log files'),
|
||||
]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
|
||||
'%(user)s %(tenant)s] %(instance)s'
|
||||
default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
|
||||
'[%(request_id)s %(user)s %(tenant)s] %(instance)s'
|
||||
'%(message)s',
|
||||
help='format string to use for log messages with context'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s %(process)d %(levelname)s %(name)s [-]'
|
||||
' %(instance)s%(message)s',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [-] %(instance)s%(message)s',
|
||||
help='format string to use for log messages without context'),
|
||||
cfg.StrOpt('logging_debug_format_suffix',
|
||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||
help='data to append to log format when level is DEBUG'),
|
||||
cfg.StrOpt('logging_exception_prefix',
|
||||
default='%(asctime)s %(process)d TRACE %(name)s %(instance)s',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||
'%(instance)s',
|
||||
help='prefix each line of exception output with this format'),
|
||||
cfg.ListOpt('default_log_levels',
|
||||
default=[
|
||||
@ -93,24 +156,9 @@ log_opts = [
|
||||
'format it like this'),
|
||||
]
|
||||
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.StrOpt('logdir',
|
||||
default=None,
|
||||
help='Log output to a per-service log file in named directory'),
|
||||
cfg.StrOpt('logfile',
|
||||
default=None,
|
||||
help='Log output to a named file'),
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error'),
|
||||
cfg.StrOpt('logfile_mode',
|
||||
default='0644',
|
||||
help='Default file mode used when creating log files'),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(common_cli_opts)
|
||||
CONF.register_cli_opts(logging_cli_opts)
|
||||
CONF.register_opts(generic_log_opts)
|
||||
CONF.register_opts(log_opts)
|
||||
|
||||
@ -148,8 +196,8 @@ def _get_binary_name():
|
||||
|
||||
|
||||
def _get_log_file_path(binary=None):
|
||||
logfile = CONF.log_file or CONF.logfile
|
||||
logdir = CONF.log_dir or CONF.logdir
|
||||
logfile = CONF.log_file
|
||||
logdir = CONF.log_dir
|
||||
|
||||
if logfile and not logdir:
|
||||
return logfile
|
||||
@ -258,7 +306,7 @@ class JSONFormatter(logging.Formatter):
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
if ('cloudbaseinit.openstack.common.notifier.log_notifier' in
|
||||
CONF.notification_driver):
|
||||
CONF.notification_driver):
|
||||
return
|
||||
notifier.api.notify(None, 'error.publisher',
|
||||
'error_notification',
|
||||
@ -277,16 +325,17 @@ def _create_logging_excepthook(product_name):
|
||||
|
||||
def setup(product_name):
|
||||
"""Setup logging."""
|
||||
if CONF.log_config:
|
||||
logging.config.fileConfig(CONF.log_config)
|
||||
else:
|
||||
_setup_logging_from_conf()
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
|
||||
if CONF.log_config:
|
||||
try:
|
||||
logging.config.fileConfig(CONF.log_config)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
else:
|
||||
_setup_logging_from_conf(product_name)
|
||||
|
||||
def set_defaults(logging_context_format_string):
|
||||
cfg.set_defaults(log_opts,
|
||||
logging_context_format_string=
|
||||
logging_context_format_string)
|
||||
|
||||
|
||||
def _find_facility_from_conf():
|
||||
@ -313,8 +362,8 @@ def _find_facility_from_conf():
|
||||
return facility
|
||||
|
||||
|
||||
def _setup_logging_from_conf(product_name):
|
||||
log_root = getLogger(product_name).logger
|
||||
def _setup_logging_from_conf():
|
||||
log_root = getLogger(None).logger
|
||||
for handler in log_root.handlers:
|
||||
log_root.removeHandler(handler)
|
||||
|
||||
@ -352,12 +401,15 @@ def _setup_logging_from_conf(product_name):
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
||||
else:
|
||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
||||
|
||||
if CONF.verbose or CONF.debug:
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
else:
|
||||
elif CONF.verbose:
|
||||
log_root.setLevel(logging.INFO)
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
level = logging.NOTSET
|
||||
for pair in CONF.default_log_levels:
|
||||
@ -418,7 +470,7 @@ class LegacyFormatter(logging.Formatter):
|
||||
self._fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
CONF.logging_debug_format_suffix):
|
||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
# Cache this on the record, Logger will respect our formated copy
|
||||
|
@ -1,68 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Network-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def parse_host_port(address, default_port=None):
|
||||
"""
|
||||
Interpret a string as a host:port pair.
|
||||
An IPv6 address MUST be escaped if accompanied by a port,
|
||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||
[2001:db8:85a3::8a2e:370]:7334.
|
||||
|
||||
>>> parse_host_port('server01:80')
|
||||
('server01', 80)
|
||||
>>> parse_host_port('server01')
|
||||
('server01', None)
|
||||
>>> parse_host_port('server01', default_port=1234)
|
||||
('server01', 1234)
|
||||
>>> parse_host_port('[::1]:80')
|
||||
('::1', 80)
|
||||
>>> parse_host_port('[::1]')
|
||||
('::1', None)
|
||||
>>> parse_host_port('[::1]', default_port=1234)
|
||||
('::1', 1234)
|
||||
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
|
||||
('2001:db8:85a3::8a2e:370:7334', 1234)
|
||||
|
||||
"""
|
||||
if address[0] == '[':
|
||||
# Escaped ipv6
|
||||
_host, _port = address[1:].split(']')
|
||||
host = _host
|
||||
if ':' in _port:
|
||||
port = _port.split(':')[1]
|
||||
else:
|
||||
port = default_port
|
||||
else:
|
||||
if address.count(':') == 1:
|
||||
host, port = address.split(':')
|
||||
else:
|
||||
# 0 means ipv4, >1 means ipv6.
|
||||
# We prohibit unescaped ipv6 addresses with port.
|
||||
host = address
|
||||
port = default_port
|
||||
|
||||
return (host, None if port is None else int(port))
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -15,7 +15,8 @@
|
||||
|
||||
import uuid
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from oslo.config import cfg
|
||||
|
||||
from cloudbaseinit.openstack.common import context
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import importutils
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -13,8 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common import jsonutils
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -13,8 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common import context as req_context
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
|
52
cloudbaseinit/openstack/common/notifier/rpc_notifier2.py
Normal file
52
cloudbaseinit/openstack/common/notifier/rpc_notifier2.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''messaging based notification driver, with message envelopes'''
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cloudbaseinit.openstack.common import context as req_context
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
from cloudbaseinit.openstack.common import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notification_topic_opt = cfg.ListOpt(
|
||||
'topics', default=['notifications', ],
|
||||
help='AMQP topic(s) used for openstack notifications')
|
||||
|
||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
||||
title='Options for rpc_notifier2')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opt(notification_topic_opt, opt_group)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC"""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
for topic in CONF.rpc_notifier2.topics:
|
||||
topic = '%s.%s' % (topic, priority)
|
||||
try:
|
||||
rpc.notify(context, topic, message, envelope=True)
|
||||
except Exception:
|
||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||
"Payload=%(message)s"), locals())
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
|
@ -1,14 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,93 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
from cloudbaseinit.openstack.common.plugin import plugin
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _CallbackNotifier(object):
|
||||
"""Manages plugin-defined notification callbacks.
|
||||
|
||||
For each Plugin, a CallbackNotifier will be added to the
|
||||
notification driver list. Calls to notify() with appropriate
|
||||
messages will be hooked and prompt callbacks.
|
||||
|
||||
A callback should look like this:
|
||||
def callback(context, message, user_data)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._callback_dict = {}
|
||||
|
||||
def _add_callback(self, event_type, callback, user_data):
|
||||
callback_list = self._callback_dict.get(event_type, [])
|
||||
callback_list.append({'function': callback,
|
||||
'user_data': user_data})
|
||||
self._callback_dict[event_type] = callback_list
|
||||
|
||||
def _remove_callback(self, callback):
|
||||
for callback_list in self._callback_dict.values():
|
||||
for entry in callback_list:
|
||||
if entry['function'] == callback:
|
||||
callback_list.remove(entry)
|
||||
|
||||
def notify(self, context, message):
|
||||
if message.get('event_type') not in self._callback_dict:
|
||||
return
|
||||
|
||||
for entry in self._callback_dict[message.get('event_type')]:
|
||||
entry['function'](context, message, entry.get('user_data'))
|
||||
|
||||
def callbacks(self):
|
||||
return self._callback_dict
|
||||
|
||||
|
||||
class CallbackPlugin(plugin.Plugin):
|
||||
""" Plugin with a simple callback interface.
|
||||
|
||||
This class is provided as a convenience for producing a simple
|
||||
plugin that only watches a couple of events. For example, here's
|
||||
a subclass which prints a line the first time an instance is created.
|
||||
|
||||
class HookInstanceCreation(CallbackPlugin):
|
||||
|
||||
def __init__(self, _service_name):
|
||||
super(HookInstanceCreation, self).__init__()
|
||||
self._add_callback(self.magic, 'compute.instance.create.start')
|
||||
|
||||
def magic(self):
|
||||
print "An instance was created!"
|
||||
self._remove_callback(self, self.magic)
|
||||
"""
|
||||
|
||||
def __init__(self, service_name):
|
||||
super(CallbackPlugin, self).__init__(service_name)
|
||||
self._callback_notifier = _CallbackNotifier()
|
||||
self._add_notifier(self._callback_notifier)
|
||||
|
||||
def _add_callback(self, callback, event_type, user_data=None):
|
||||
"""Add callback for a given event notification.
|
||||
|
||||
Subclasses can call this as an alternative to implementing
|
||||
a fullblown notify notifier.
|
||||
"""
|
||||
self._callback_notifier._add_callback(event_type, callback, user_data)
|
||||
|
||||
def _remove_callback(self, callback):
|
||||
"""Remove all notification callbacks to specified function."""
|
||||
self._callback_notifier._remove_callback(callback)
|
@ -1,86 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Plugin(object):
|
||||
"""Defines an interface for adding functionality to an OpenStack service.
|
||||
|
||||
A plugin interacts with a service via the following pathways:
|
||||
|
||||
- An optional set of notifiers, managed by calling add_notifier()
|
||||
or by overriding _notifiers()
|
||||
|
||||
- A set of api extensions, managed via add_api_extension_descriptor()
|
||||
|
||||
- Direct calls to service functions.
|
||||
|
||||
- Whatever else the plugin wants to do on its own.
|
||||
|
||||
This is the reference implementation.
|
||||
"""
|
||||
|
||||
# The following functions are provided as convenience methods
|
||||
# for subclasses. Subclasses should call them but probably not
|
||||
# override them.
|
||||
def _add_api_extension_descriptor(self, descriptor):
|
||||
"""Subclass convenience method which adds an extension descriptor.
|
||||
|
||||
Subclass constructors should call this method when
|
||||
extending a project's REST interface.
|
||||
|
||||
Note that once the api service has loaded, the
|
||||
API extension set is more-or-less fixed, so
|
||||
this should mainly be called by subclass constructors.
|
||||
"""
|
||||
self._api_extension_descriptors.append(descriptor)
|
||||
|
||||
def _add_notifier(self, notifier):
|
||||
"""Subclass convenience method which adds a notifier.
|
||||
|
||||
Notifier objects should implement the function notify(message).
|
||||
Each notifier receives a notify() call whenever an openstack
|
||||
service broadcasts a notification.
|
||||
|
||||
Best to call this during construction. Notifiers are enumerated
|
||||
and registered by the pluginmanager at plugin load time.
|
||||
"""
|
||||
self._notifiers.append(notifier)
|
||||
|
||||
# The following methods are called by OpenStack services to query
|
||||
# plugin features. Subclasses should probably not override these.
|
||||
def _notifiers(self):
|
||||
"""Returns list of notifiers for this plugin."""
|
||||
return self._notifiers
|
||||
|
||||
notifiers = property(_notifiers)
|
||||
|
||||
def _api_extension_descriptors(self):
|
||||
"""Return a list of API extension descriptors.
|
||||
|
||||
Called by a project API during its load sequence.
|
||||
"""
|
||||
return self._api_extension_descriptors
|
||||
|
||||
api_extension_descriptors = property(_api_extension_descriptors)
|
||||
|
||||
# Most plugins will override this:
|
||||
def __init__(self, service_name):
|
||||
self._notifiers = []
|
||||
self._api_extension_descriptors = []
|
@ -1,77 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
from cloudbaseinit.openstack.common.notifier import api as notifier_api
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginManager(object):
|
||||
"""Manages plugin entrypoints and loading.
|
||||
|
||||
For a service to implement this plugin interface for callback purposes:
|
||||
|
||||
- Make use of the openstack-common notifier system
|
||||
- Instantiate this manager in each process (passing in
|
||||
project and service name)
|
||||
|
||||
For an API service to extend itself using this plugin interface,
|
||||
it needs to query the plugin_extension_factory provided by
|
||||
the already-instantiated PluginManager.
|
||||
"""
|
||||
|
||||
def __init__(self, project_name, service_name):
|
||||
""" Construct Plugin Manager; load and initialize plugins.
|
||||
|
||||
project_name (e.g. 'nova' or 'glance') is used
|
||||
to construct the entry point that identifies plugins.
|
||||
|
||||
The service_name (e.g. 'compute') is passed on to
|
||||
each plugin as a raw string for it to do what it will.
|
||||
"""
|
||||
self._project_name = project_name
|
||||
self._service_name = service_name
|
||||
self.plugins = []
|
||||
|
||||
def load_plugins(self):
|
||||
self.plugins = []
|
||||
|
||||
for entrypoint in pkg_resources.iter_entry_points('%s.plugin' %
|
||||
self._project_name):
|
||||
try:
|
||||
pluginclass = entrypoint.load()
|
||||
plugin = pluginclass(self._service_name)
|
||||
self.plugins.append(plugin)
|
||||
except Exception, exc:
|
||||
LOG.error(_("Failed to load plugin %(plug)s: %(exc)s") %
|
||||
{'plug': entrypoint, 'exc': exc})
|
||||
|
||||
# Register individual notifiers.
|
||||
for plugin in self.plugins:
|
||||
for notifier in plugin.notifiers:
|
||||
notifier_api.add_driver(notifier)
|
||||
|
||||
def plugin_extension_factory(self, ext_mgr):
|
||||
for plugin in self.plugins:
|
||||
descriptors = plugin.api_extension_descriptors
|
||||
for descriptor in descriptors:
|
||||
ext_mgr.load_extension(descriptor)
|
@ -1,779 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Common Policy Engine Implementation
|
||||
|
||||
Policies can be expressed in one of two forms: A list of lists, or a
|
||||
string written in the new policy language.
|
||||
|
||||
In the list-of-lists representation, each check inside the innermost
|
||||
list is combined as with an "and" conjunction--for that check to pass,
|
||||
all the specified checks must pass. These innermost lists are then
|
||||
combined as with an "or" conjunction. This is the original way of
|
||||
expressing policies, but there now exists a new way: the policy
|
||||
language.
|
||||
|
||||
In the policy language, each check is specified the same way as in the
|
||||
list-of-lists representation: a simple "a:b" pair that is matched to
|
||||
the correct code to perform that check. However, conjunction
|
||||
operators are available, allowing for more expressiveness in crafting
|
||||
policies.
|
||||
|
||||
As an example, take the following rule, expressed in the list-of-lists
|
||||
representation::
|
||||
|
||||
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
|
||||
|
||||
In the policy language, this becomes::
|
||||
|
||||
role:admin or (project_id:%(project_id)s and role:projectadmin)
|
||||
|
||||
The policy language also has the "not" operator, allowing a richer
|
||||
policy rule::
|
||||
|
||||
project_id:%(project_id)s and not role:dunce
|
||||
|
||||
Finally, two special policy checks should be mentioned; the policy
|
||||
check "@" will always accept an access, and the policy check "!" will
|
||||
always reject an access. (Note that if a rule is either the empty
|
||||
list ("[]") or the empty string, this is equivalent to the "@" policy
|
||||
check.) Of these, the "!" policy check is probably the most useful,
|
||||
as it allows particular rules to be explicitly disabled.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import logging
|
||||
import re
|
||||
import urllib
|
||||
|
||||
import urllib2
|
||||
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import jsonutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_rules = None
|
||||
_checks = {}
|
||||
|
||||
|
||||
class Rules(dict):
|
||||
"""
|
||||
A store for rules. Handles the default_rule setting directly.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def load_json(cls, data, default_rule=None):
|
||||
"""
|
||||
Allow loading of JSON rule data.
|
||||
"""
|
||||
|
||||
# Suck in the JSON data and parse the rules
|
||||
rules = dict((k, parse_rule(v)) for k, v in
|
||||
jsonutils.loads(data).items())
|
||||
|
||||
return cls(rules, default_rule)
|
||||
|
||||
def __init__(self, rules=None, default_rule=None):
|
||||
"""Initialize the Rules store."""
|
||||
|
||||
super(Rules, self).__init__(rules or {})
|
||||
self.default_rule = default_rule
|
||||
|
||||
def __missing__(self, key):
|
||||
"""Implements the default rule handling."""
|
||||
|
||||
# If the default rule isn't actually defined, do something
|
||||
# reasonably intelligent
|
||||
if not self.default_rule or self.default_rule not in self:
|
||||
raise KeyError(key)
|
||||
|
||||
return self[self.default_rule]
|
||||
|
||||
def __str__(self):
|
||||
"""Dumps a string representation of the rules."""
|
||||
|
||||
# Start by building the canonical strings for the rules
|
||||
out_rules = {}
|
||||
for key, value in self.items():
|
||||
# Use empty string for singleton TrueCheck instances
|
||||
if isinstance(value, TrueCheck):
|
||||
out_rules[key] = ''
|
||||
else:
|
||||
out_rules[key] = str(value)
|
||||
|
||||
# Dump a pretty-printed JSON representation
|
||||
return jsonutils.dumps(out_rules, indent=4)
|
||||
|
||||
|
||||
# Really have to figure out a way to deprecate this
|
||||
def set_rules(rules):
|
||||
"""Set the rules in use for policy checks."""
|
||||
|
||||
global _rules
|
||||
|
||||
_rules = rules
|
||||
|
||||
|
||||
# Ditto
|
||||
def reset():
|
||||
"""Clear the rules used for policy checks."""
|
||||
|
||||
global _rules
|
||||
|
||||
_rules = None
|
||||
|
||||
|
||||
def check(rule, target, creds, exc=None, *args, **kwargs):
|
||||
"""
|
||||
Checks authorization of a rule against the target and credentials.
|
||||
|
||||
:param rule: The rule to evaluate.
|
||||
:param target: As much information about the object being operated
|
||||
on as possible, as a dictionary.
|
||||
:param creds: As much information about the user performing the
|
||||
action as possible, as a dictionary.
|
||||
:param exc: Class of the exception to raise if the check fails.
|
||||
Any remaining arguments passed to check() (both
|
||||
positional and keyword arguments) will be passed to
|
||||
the exception class. If exc is not provided, returns
|
||||
False.
|
||||
|
||||
:return: Returns False if the policy does not allow the action and
|
||||
exc is not provided; otherwise, returns a value that
|
||||
evaluates to True. Note: for rules using the "case"
|
||||
expression, this True value will be the specified string
|
||||
from the expression.
|
||||
"""
|
||||
|
||||
# Allow the rule to be a Check tree
|
||||
if isinstance(rule, BaseCheck):
|
||||
result = rule(target, creds)
|
||||
elif not _rules:
|
||||
# No rules to reference means we're going to fail closed
|
||||
result = False
|
||||
else:
|
||||
try:
|
||||
# Evaluate the rule
|
||||
result = _rules[rule](target, creds)
|
||||
except KeyError:
|
||||
# If the rule doesn't exist, fail closed
|
||||
result = False
|
||||
|
||||
# If it is False, raise the exception if requested
|
||||
if exc and result is False:
|
||||
raise exc(*args, **kwargs)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class BaseCheck(object):
|
||||
"""
|
||||
Abstract base class for Check classes.
|
||||
"""
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
@abc.abstractmethod
|
||||
def __str__(self):
|
||||
"""
|
||||
Retrieve a string representation of the Check tree rooted at
|
||||
this node.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Perform the check. Returns False to reject the access or a
|
||||
true value (not necessary True) to accept the access.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FalseCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that always returns False (disallow).
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "!"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class TrueCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that always returns True (allow).
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "@"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Check(BaseCheck):
|
||||
"""
|
||||
A base class to allow for user-defined policy checks.
|
||||
"""
|
||||
|
||||
def __init__(self, kind, match):
|
||||
"""
|
||||
:param kind: The kind of the check, i.e., the field before the
|
||||
':'.
|
||||
:param match: The match of the check, i.e., the field after
|
||||
the ':'.
|
||||
"""
|
||||
|
||||
self.kind = kind
|
||||
self.match = match
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "%s:%s" % (self.kind, self.match)
|
||||
|
||||
|
||||
class NotCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that inverts the result of another policy check.
|
||||
Implements the "not" operator.
|
||||
"""
|
||||
|
||||
def __init__(self, rule):
|
||||
"""
|
||||
Initialize the 'not' check.
|
||||
|
||||
:param rule: The rule to negate. Must be a Check.
|
||||
"""
|
||||
|
||||
self.rule = rule
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "not %s" % self.rule
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Check the policy. Returns the logical inverse of the wrapped
|
||||
check.
|
||||
"""
|
||||
|
||||
return not self.rule(target, cred)
|
||||
|
||||
|
||||
class AndCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that requires that a list of other checks all
|
||||
return True. Implements the "and" operator.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""
|
||||
Initialize the 'and' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Check the policy. Requires that all rules accept in order to
|
||||
return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if not rule(target, cred):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def add_check(self, rule):
|
||||
"""
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the AndCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
class OrCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that requires that at least one of a list of other
|
||||
checks returns True. Implements the "or" operator.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""
|
||||
Initialize the 'or' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Check the policy. Requires that at least one rule accept in
|
||||
order to return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if rule(target, cred):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def add_check(self, rule):
|
||||
"""
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the OrCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
def _parse_check(rule):
|
||||
"""
|
||||
Parse a single base check rule into an appropriate Check object.
|
||||
"""
|
||||
|
||||
# Handle the special checks
|
||||
if rule == '!':
|
||||
return FalseCheck()
|
||||
elif rule == '@':
|
||||
return TrueCheck()
|
||||
|
||||
try:
|
||||
kind, match = rule.split(':', 1)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to understand rule %(rule)s") % locals())
|
||||
# If the rule is invalid, we'll fail closed
|
||||
return FalseCheck()
|
||||
|
||||
# Find what implements the check
|
||||
if kind in _checks:
|
||||
return _checks[kind](kind, match)
|
||||
elif None in _checks:
|
||||
return _checks[None](kind, match)
|
||||
else:
|
||||
LOG.error(_("No handler for matches of kind %s") % kind)
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def _parse_list_rule(rule):
|
||||
"""
|
||||
Provided for backwards compatibility. Translates the old
|
||||
list-of-lists syntax into a tree of Check objects.
|
||||
"""
|
||||
|
||||
# Empty rule defaults to True
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Outer list is joined by "or"; inner list by "and"
|
||||
or_list = []
|
||||
for inner_rule in rule:
|
||||
# Elide empty inner lists
|
||||
if not inner_rule:
|
||||
continue
|
||||
|
||||
# Handle bare strings
|
||||
if isinstance(inner_rule, basestring):
|
||||
inner_rule = [inner_rule]
|
||||
|
||||
# Parse the inner rules into Check objects
|
||||
and_list = [_parse_check(r) for r in inner_rule]
|
||||
|
||||
# Append the appropriate check to the or_list
|
||||
if len(and_list) == 1:
|
||||
or_list.append(and_list[0])
|
||||
else:
|
||||
or_list.append(AndCheck(and_list))
|
||||
|
||||
# If we have only one check, omit the "or"
|
||||
if len(or_list) == 0:
|
||||
return FalseCheck()
|
||||
elif len(or_list) == 1:
|
||||
return or_list[0]
|
||||
|
||||
return OrCheck(or_list)
|
||||
|
||||
|
||||
# Used for tokenizing the policy language
|
||||
_tokenize_re = re.compile(r'\s+')
|
||||
|
||||
|
||||
def _parse_tokenize(rule):
|
||||
"""
|
||||
Tokenizer for the policy language.
|
||||
|
||||
Most of the single-character tokens are specified in the
|
||||
_tokenize_re; however, parentheses need to be handled specially,
|
||||
because they can appear inside a check string. Thankfully, those
|
||||
parentheses that appear inside a check string can never occur at
|
||||
the very beginning or end ("%(variable)s" is the correct syntax).
|
||||
"""
|
||||
|
||||
for tok in _tokenize_re.split(rule):
|
||||
# Skip empty tokens
|
||||
if not tok or tok.isspace():
|
||||
continue
|
||||
|
||||
# Handle leading parens on the token
|
||||
clean = tok.lstrip('(')
|
||||
for i in range(len(tok) - len(clean)):
|
||||
yield '(', '('
|
||||
|
||||
# If it was only parentheses, continue
|
||||
if not clean:
|
||||
continue
|
||||
else:
|
||||
tok = clean
|
||||
|
||||
# Handle trailing parens on the token
|
||||
clean = tok.rstrip(')')
|
||||
trail = len(tok) - len(clean)
|
||||
|
||||
# Yield the cleaned token
|
||||
lowered = clean.lower()
|
||||
if lowered in ('and', 'or', 'not'):
|
||||
# Special tokens
|
||||
yield lowered, clean
|
||||
elif clean:
|
||||
# Not a special token, but not composed solely of ')'
|
||||
if len(tok) >= 2 and ((tok[0], tok[-1]) in
|
||||
[('"', '"'), ("'", "'")]):
|
||||
# It's a quoted string
|
||||
yield 'string', tok[1:-1]
|
||||
else:
|
||||
yield 'check', _parse_check(clean)
|
||||
|
||||
# Yield the trailing parens
|
||||
for i in range(trail):
|
||||
yield ')', ')'
|
||||
|
||||
|
||||
class ParseStateMeta(type):
|
||||
"""
|
||||
Metaclass for the ParseState class. Facilitates identifying
|
||||
reduction methods.
|
||||
"""
|
||||
|
||||
def __new__(mcs, name, bases, cls_dict):
|
||||
"""
|
||||
Create the class. Injects the 'reducers' list, a list of
|
||||
tuples matching token sequences to the names of the
|
||||
corresponding reduction methods.
|
||||
"""
|
||||
|
||||
reducers = []
|
||||
|
||||
for key, value in cls_dict.items():
|
||||
if not hasattr(value, 'reducers'):
|
||||
continue
|
||||
for reduction in value.reducers:
|
||||
reducers.append((reduction, key))
|
||||
|
||||
cls_dict['reducers'] = reducers
|
||||
|
||||
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
|
||||
|
||||
|
||||
def reducer(*tokens):
|
||||
"""
|
||||
Decorator for reduction methods. Arguments are a sequence of
|
||||
tokens, in order, which should trigger running this reduction
|
||||
method.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
# Make sure we have a list of reducer sequences
|
||||
if not hasattr(func, 'reducers'):
|
||||
func.reducers = []
|
||||
|
||||
# Add the tokens to the list of reducer sequences
|
||||
func.reducers.append(list(tokens))
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class ParseState(object):
|
||||
"""
|
||||
Implement the core of parsing the policy language. Uses a greedy
|
||||
reduction algorithm to reduce a sequence of tokens into a single
|
||||
terminal, the value of which will be the root of the Check tree.
|
||||
|
||||
Note: error reporting is rather lacking. The best we can get with
|
||||
this parser formulation is an overall "parse failed" error.
|
||||
Fortunately, the policy language is simple enough that this
|
||||
shouldn't be that big a problem.
|
||||
"""
|
||||
|
||||
__metaclass__ = ParseStateMeta
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the ParseState."""
|
||||
|
||||
self.tokens = []
|
||||
self.values = []
|
||||
|
||||
def reduce(self):
|
||||
"""
|
||||
Perform a greedy reduction of the token stream. If a reducer
|
||||
method matches, it will be executed, then the reduce() method
|
||||
will be called recursively to search for any more possible
|
||||
reductions.
|
||||
"""
|
||||
|
||||
for reduction, methname in self.reducers:
|
||||
if (len(self.tokens) >= len(reduction) and
|
||||
self.tokens[-len(reduction):] == reduction):
|
||||
# Get the reduction method
|
||||
meth = getattr(self, methname)
|
||||
|
||||
# Reduce the token stream
|
||||
results = meth(*self.values[-len(reduction):])
|
||||
|
||||
# Update the tokens and values
|
||||
self.tokens[-len(reduction):] = [r[0] for r in results]
|
||||
self.values[-len(reduction):] = [r[1] for r in results]
|
||||
|
||||
# Check for any more reductions
|
||||
return self.reduce()
|
||||
|
||||
def shift(self, tok, value):
|
||||
"""Adds one more token to the state. Calls reduce()."""
|
||||
|
||||
self.tokens.append(tok)
|
||||
self.values.append(value)
|
||||
|
||||
# Do a greedy reduce...
|
||||
self.reduce()
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
"""
|
||||
Obtain the final result of the parse. Raises ValueError if
|
||||
the parse failed to reduce to a single result.
|
||||
"""
|
||||
|
||||
if len(self.values) != 1:
|
||||
raise ValueError("Could not parse rule")
|
||||
return self.values[0]
|
||||
|
||||
@reducer('(', 'check', ')')
|
||||
@reducer('(', 'and_expr', ')')
|
||||
@reducer('(', 'or_expr', ')')
|
||||
def _wrap_check(self, _p1, check, _p2):
|
||||
"""Turn parenthesized expressions into a 'check' token."""
|
||||
|
||||
return [('check', check)]
|
||||
|
||||
@reducer('check', 'and', 'check')
|
||||
def _make_and_expr(self, check1, _and, check2):
|
||||
"""
|
||||
Create an 'and_expr' from two checks joined by the 'and'
|
||||
operator.
|
||||
"""
|
||||
|
||||
return [('and_expr', AndCheck([check1, check2]))]
|
||||
|
||||
@reducer('and_expr', 'and', 'check')
|
||||
def _extend_and_expr(self, and_expr, _and, check):
|
||||
"""
|
||||
Extend an 'and_expr' by adding one more check.
|
||||
"""
|
||||
|
||||
return [('and_expr', and_expr.add_check(check))]
|
||||
|
||||
@reducer('check', 'or', 'check')
|
||||
def _make_or_expr(self, check1, _or, check2):
|
||||
"""
|
||||
Create an 'or_expr' from two checks joined by the 'or'
|
||||
operator.
|
||||
"""
|
||||
|
||||
return [('or_expr', OrCheck([check1, check2]))]
|
||||
|
||||
@reducer('or_expr', 'or', 'check')
|
||||
def _extend_or_expr(self, or_expr, _or, check):
|
||||
"""
|
||||
Extend an 'or_expr' by adding one more check.
|
||||
"""
|
||||
|
||||
return [('or_expr', or_expr.add_check(check))]
|
||||
|
||||
@reducer('not', 'check')
|
||||
def _make_not_expr(self, _not, check):
|
||||
"""Invert the result of another check."""
|
||||
|
||||
return [('check', NotCheck(check))]
|
||||
|
||||
|
||||
def _parse_text_rule(rule):
|
||||
"""
|
||||
Translates a policy written in the policy language into a tree of
|
||||
Check objects.
|
||||
"""
|
||||
|
||||
# Empty rule means always accept
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Parse the token stream
|
||||
state = ParseState()
|
||||
for tok, value in _parse_tokenize(rule):
|
||||
state.shift(tok, value)
|
||||
|
||||
try:
|
||||
return state.result
|
||||
except ValueError:
|
||||
# Couldn't parse the rule
|
||||
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
|
||||
|
||||
# Fail closed
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def parse_rule(rule):
|
||||
"""
|
||||
Parses a policy rule into a tree of Check objects.
|
||||
"""
|
||||
|
||||
# If the rule is a string, it's in the policy language
|
||||
if isinstance(rule, basestring):
|
||||
return _parse_text_rule(rule)
|
||||
return _parse_list_rule(rule)
|
||||
|
||||
|
||||
def register(name, func=None):
|
||||
"""
|
||||
Register a function or Check class as a policy check.
|
||||
|
||||
:param name: Gives the name of the check type, e.g., 'rule',
|
||||
'role', etc. If name is None, a default check type
|
||||
will be registered.
|
||||
:param func: If given, provides the function or class to register.
|
||||
If not given, returns a function taking one argument
|
||||
to specify the function or class to register,
|
||||
allowing use as a decorator.
|
||||
"""
|
||||
|
||||
# Perform the actual decoration by registering the function or
|
||||
# class. Returns the function or class for compliance with the
|
||||
# decorator interface.
|
||||
def decorator(func):
|
||||
_checks[name] = func
|
||||
return func
|
||||
|
||||
# If the function or class is given, do the registration
|
||||
if func:
|
||||
return decorator(func)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@register("rule")
|
||||
class RuleCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""
|
||||
Recursively checks credentials based on the defined rules.
|
||||
"""
|
||||
|
||||
try:
|
||||
return _rules[self.match](target, creds)
|
||||
except KeyError:
|
||||
# We don't have any matching rule; fail closed
|
||||
return False
|
||||
|
||||
|
||||
@register("role")
|
||||
class RoleCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""Check that there is a matching role in the cred dict."""
|
||||
|
||||
return self.match.lower() in [x.lower() for x in creds['roles']]
|
||||
|
||||
|
||||
@register('http')
|
||||
class HttpCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""
|
||||
Check http: rules by calling to a remote server.
|
||||
|
||||
This example implementation simply verifies that the response
|
||||
is exactly 'True'.
|
||||
"""
|
||||
|
||||
url = ('http:' + self.match) % target
|
||||
data = {'target': jsonutils.dumps(target),
|
||||
'credentials': jsonutils.dumps(creds)}
|
||||
post_data = urllib.urlencode(data)
|
||||
f = urllib2.urlopen(url, post_data)
|
||||
return f.read() == "True"
|
||||
|
||||
|
||||
@register(None)
|
||||
class GenericCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""
|
||||
Check an individual match.
|
||||
|
||||
Matches look like:
|
||||
|
||||
tenant:%(tenant_id)s
|
||||
role:compute:admin
|
||||
"""
|
||||
|
||||
# TODO(termie): do dict inspection via dot syntax
|
||||
match = self.match % target
|
||||
if self.kind in creds:
|
||||
return match == unicode(creds[self.kind])
|
||||
return False
|
@ -1,270 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A remote procedure call (rpc) abstraction.
|
||||
|
||||
For some wrappers that add message versioning to rpc, see:
|
||||
rpc.dispatcher
|
||||
rpc.proxy
|
||||
"""
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common import importutils
|
||||
|
||||
|
||||
rpc_opts = [
|
||||
cfg.StrOpt('rpc_backend',
|
||||
default='%s.impl_kombu' % __package__,
|
||||
help="The messaging module to use, defaults to kombu."),
|
||||
cfg.IntOpt('rpc_thread_pool_size',
|
||||
default=64,
|
||||
help='Size of RPC thread pool'),
|
||||
cfg.IntOpt('rpc_conn_pool_size',
|
||||
default=30,
|
||||
help='Size of RPC connection pool'),
|
||||
cfg.IntOpt('rpc_response_timeout',
|
||||
default=60,
|
||||
help='Seconds to wait for a response from call or multicall'),
|
||||
cfg.IntOpt('rpc_cast_timeout',
|
||||
default=30,
|
||||
help='Seconds to wait before a cast expires (TTL). '
|
||||
'Only supported by impl_zmq.'),
|
||||
cfg.ListOpt('allowed_rpc_exception_modules',
|
||||
default=['cloudbaseinit.openstack.common.exception',
|
||||
'nova.exception',
|
||||
'cinder.exception',
|
||||
],
|
||||
help='Modules of exceptions that are permitted to be recreated'
|
||||
'upon receiving exception data from an rpc call.'),
|
||||
cfg.BoolOpt('fake_rabbit',
|
||||
default=False,
|
||||
help='If passed, use a fake RabbitMQ provider'),
|
||||
#
|
||||
# The following options are not registered here, but are expected to be
|
||||
# present. The project using this library must register these options with
|
||||
# the configuration so that project-specific defaults may be defined.
|
||||
#
|
||||
#cfg.StrOpt('control_exchange',
|
||||
# default='nova',
|
||||
# help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(rpc_opts)
|
||||
|
||||
|
||||
def create_connection(new=True):
|
||||
"""Create a connection to the message bus used for rpc.
|
||||
|
||||
For some example usage of creating a connection and some consumers on that
|
||||
connection, see nova.service.
|
||||
|
||||
:param new: Whether or not to create a new connection. A new connection
|
||||
will be created by default. If new is False, the
|
||||
implementation is free to return an existing connection from a
|
||||
pool.
|
||||
|
||||
:returns: An instance of openstack.common.rpc.common.Connection
|
||||
"""
|
||||
return _get_impl().create_connection(cfg.CONF, new=new)
|
||||
|
||||
|
||||
def call(context, topic, msg, timeout=None):
|
||||
"""Invoke a remote method that returns something.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
|
||||
:returns: A dict from the remote method.
|
||||
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def cast(context, topic, msg):
|
||||
"""Invoke a remote method that does not return anything.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast(cfg.CONF, context, topic, msg)
|
||||
|
||||
|
||||
def fanout_cast(context, topic, msg):
|
||||
"""Broadcast a remote method invocation with no return.
|
||||
|
||||
This method will get invoked on all consumers that were set up with this
|
||||
topic name and fanout=True.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=True.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
|
||||
|
||||
|
||||
def multicall(context, topic, msg, timeout=None):
|
||||
"""Invoke a remote method and get back an iterator.
|
||||
|
||||
In this case, the remote method will be returning multiple values in
|
||||
separate messages, so the return values can be processed as the come in via
|
||||
an iterator.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
|
||||
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
||||
an index that starts at 0 and increases by one for each value
|
||||
returned and X is the Nth value that was returned by the remote
|
||||
method.
|
||||
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def notify(context, topic, msg):
|
||||
"""Send notification event.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict of content of event.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().notify(cfg.CONF, context, topic, msg)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Clean up resoruces in use by implementation.
|
||||
|
||||
Clean up any resources that have been allocated by the RPC implementation.
|
||||
This is typically open connections to a messaging service. This function
|
||||
would get called before an application using this API exits to allow
|
||||
connections to get torn down cleanly.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cleanup()
|
||||
|
||||
|
||||
def cast_to_server(context, server_params, topic, msg):
|
||||
"""Invoke a remote method that does not return anything.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param server_params: Connection information
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
|
||||
msg)
|
||||
|
||||
|
||||
def fanout_cast_to_server(context, server_params, topic, msg):
|
||||
"""Broadcast to a remote method invocation with no return.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param server_params: Connection information
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
|
||||
topic, msg)
|
||||
|
||||
|
||||
def queue_get_for(context, topic, host):
|
||||
"""Get a queue name for a given topic + host.
|
||||
|
||||
This function only works if this naming convention is followed on the
|
||||
consumer side, as well. For example, in nova, every instance of the
|
||||
nova-foo service calls create_consumer() for two topics:
|
||||
|
||||
foo
|
||||
foo.<host>
|
||||
|
||||
Messages sent to the 'foo' topic are distributed to exactly one instance of
|
||||
the nova-foo service. The services are chosen in a round-robin fashion.
|
||||
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
|
||||
<host>.
|
||||
"""
|
||||
return '%s.%s' % (topic, host) if host else topic
|
||||
|
||||
|
||||
_RPCIMPL = None
|
||||
|
||||
|
||||
def _get_impl():
|
||||
"""Delay import of rpc_backend until configuration is loaded."""
|
||||
global _RPCIMPL
|
||||
if _RPCIMPL is None:
|
||||
try:
|
||||
_RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
|
||||
except ImportError:
|
||||
# For backwards compatibility with older nova config.
|
||||
impl = cfg.CONF.rpc_backend.replace('nova.rpc',
|
||||
'nova.openstack.common.rpc')
|
||||
_RPCIMPL = importutils.import_module(impl)
|
||||
return _RPCIMPL
|
@ -1,427 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Shared code between AMQP based openstack.common.rpc implementations.
|
||||
|
||||
The code in this module is shared between the rpc implemenations based on AMQP.
|
||||
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
|
||||
AMQP, but is deprecated and predates this code.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from eventlet import greenpool
|
||||
from eventlet import pools
|
||||
from eventlet import semaphore
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common import excutils
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import local
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
from cloudbaseinit.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Pool(pools.Pool):
|
||||
"""Class that implements a Pool of Connections."""
|
||||
def __init__(self, conf, connection_cls, *args, **kwargs):
|
||||
self.connection_cls = connection_cls
|
||||
self.conf = conf
|
||||
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
|
||||
kwargs.setdefault("order_as_stack", True)
|
||||
super(Pool, self).__init__(*args, **kwargs)
|
||||
|
||||
# TODO(comstud): Timeout connections not used in a while
|
||||
def create(self):
|
||||
LOG.debug(_('Pool creating new connection'))
|
||||
return self.connection_cls(self.conf)
|
||||
|
||||
def empty(self):
|
||||
while self.free_items:
|
||||
self.get().close()
|
||||
|
||||
|
||||
_pool_create_sem = semaphore.Semaphore()
|
||||
|
||||
|
||||
def get_connection_pool(conf, connection_cls):
|
||||
with _pool_create_sem:
|
||||
# Make sure only one thread tries to create the connection pool.
|
||||
if not connection_cls.pool:
|
||||
connection_cls.pool = Pool(conf, connection_cls)
|
||||
return connection_cls.pool
|
||||
|
||||
|
||||
class ConnectionContext(rpc_common.Connection):
|
||||
"""The class that is actually returned to the caller of
|
||||
create_connection(). This is essentially a wrapper around
|
||||
Connection that supports 'with'. It can also return a new
|
||||
Connection, or one from a pool. The function will also catch
|
||||
when an instance of this class is to be deleted. With that
|
||||
we can return Connections to the pool on exceptions and so
|
||||
forth without making the caller be responsible for catching
|
||||
them. If possible the function makes sure to return a
|
||||
connection to the pool.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
||||
"""Create a new connection, or get one from the pool"""
|
||||
self.connection = None
|
||||
self.conf = conf
|
||||
self.connection_pool = connection_pool
|
||||
if pooled:
|
||||
self.connection = connection_pool.get()
|
||||
else:
|
||||
self.connection = connection_pool.connection_cls(
|
||||
conf,
|
||||
server_params=server_params)
|
||||
self.pooled = pooled
|
||||
|
||||
def __enter__(self):
|
||||
"""When with ConnectionContext() is used, return self"""
|
||||
return self
|
||||
|
||||
def _done(self):
|
||||
"""If the connection came from a pool, clean it up and put it back.
|
||||
If it did not come from a pool, close it.
|
||||
"""
|
||||
if self.connection:
|
||||
if self.pooled:
|
||||
# Reset the connection so it's ready for the next caller
|
||||
# to grab from the pool
|
||||
self.connection.reset()
|
||||
self.connection_pool.put(self.connection)
|
||||
else:
|
||||
try:
|
||||
self.connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.connection = None
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
"""End of 'with' statement. We're done here."""
|
||||
self._done()
|
||||
|
||||
def __del__(self):
|
||||
"""Caller is done with this connection. Make sure we cleaned up."""
|
||||
self._done()
|
||||
|
||||
def close(self):
|
||||
"""Caller is done with this connection."""
|
||||
self._done()
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
self.connection.create_consumer(topic, proxy, fanout)
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
self.connection.create_worker(topic, proxy, pool_name)
|
||||
|
||||
def consume_in_thread(self):
|
||||
self.connection.consume_in_thread()
|
||||
|
||||
def __getattr__(self, key):
|
||||
"""Proxy all other calls to the Connection instance"""
|
||||
if self.connection:
|
||||
return getattr(self.connection, key)
|
||||
else:
|
||||
raise rpc_common.InvalidRPCConnectionReuse()
|
||||
|
||||
|
||||
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
|
||||
ending=False):
|
||||
"""Sends a reply or an error on the channel signified by msg_id.
|
||||
|
||||
Failure should be a sys.exc_info() tuple.
|
||||
|
||||
"""
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
if failure:
|
||||
failure = rpc_common.serialize_remote_exception(failure)
|
||||
|
||||
try:
|
||||
msg = {'result': reply, 'failure': failure}
|
||||
except TypeError:
|
||||
msg = {'result': dict((k, repr(v))
|
||||
for k, v in reply.__dict__.iteritems()),
|
||||
'failure': failure}
|
||||
if ending:
|
||||
msg['ending'] = True
|
||||
conn.direct_send(msg_id, msg)
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call"""
|
||||
def __init__(self, **kwargs):
|
||||
self.msg_id = kwargs.pop('msg_id', None)
|
||||
self.conf = kwargs.pop('conf')
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
values['conf'] = self.conf
|
||||
values['msg_id'] = self.msg_id
|
||||
return self.__class__(**values)
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False,
|
||||
connection_pool=None):
|
||||
if self.msg_id:
|
||||
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
|
||||
ending)
|
||||
if ending:
|
||||
self.msg_id = None
|
||||
|
||||
|
||||
def unpack_context(conf, msg):
|
||||
"""Unpack context from msg."""
|
||||
context_dict = {}
|
||||
for key in list(msg.keys()):
|
||||
# NOTE(vish): Some versions of python don't like unicode keys
|
||||
# in kwargs.
|
||||
key = str(key)
|
||||
if key.startswith('_context_'):
|
||||
value = msg.pop(key)
|
||||
context_dict[key[9:]] = value
|
||||
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
||||
context_dict['conf'] = conf
|
||||
ctx = RpcContext.from_dict(context_dict)
|
||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
||||
return ctx
|
||||
|
||||
|
||||
def pack_context(msg, context):
|
||||
"""Pack context into msg.
|
||||
|
||||
Values for message keys need to be less than 255 chars, so we pull
|
||||
context out into a bunch of separate keys. If we want to support
|
||||
more arguments in rabbit messages, we may want to do the same
|
||||
for args at some point.
|
||||
|
||||
"""
|
||||
context_d = dict([('_context_%s' % key, value)
|
||||
for (key, value) in context.to_dict().iteritems()])
|
||||
msg.update(context_d)
|
||||
|
||||
|
||||
class ProxyCallback(object):
|
||||
"""Calls methods on a proxy object based on method and args."""
|
||||
|
||||
def __init__(self, conf, proxy, connection_pool):
|
||||
self.proxy = proxy
|
||||
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
self.connection_pool = connection_pool
|
||||
self.conf = conf
|
||||
|
||||
def __call__(self, message_data):
|
||||
"""Consumer callback to call a method on a proxy object.
|
||||
|
||||
Parses the message for validity and fires off a thread to call the
|
||||
proxy object method.
|
||||
|
||||
Message data should be a dictionary with two keys:
|
||||
method: string representing the method to call
|
||||
args: dictionary of arg: value
|
||||
|
||||
Example: {'method': 'echo', 'args': {'value': 42}}
|
||||
|
||||
"""
|
||||
# It is important to clear the context here, because at this point
|
||||
# the previous context is stored in local.store.context
|
||||
if hasattr(local.store, 'context'):
|
||||
del local.store.context
|
||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
||||
ctxt = unpack_context(self.conf, message_data)
|
||||
method = message_data.get('method')
|
||||
args = message_data.get('args', {})
|
||||
version = message_data.get('version', None)
|
||||
if not method:
|
||||
LOG.warn(_('no method for message: %s') % message_data)
|
||||
ctxt.reply(_('No method for message: %s') % message_data,
|
||||
connection_pool=self.connection_pool)
|
||||
return
|
||||
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
|
||||
|
||||
def _process_data(self, ctxt, version, method, args):
|
||||
"""Process a message in a new thread.
|
||||
|
||||
If the proxy object we have has a dispatch method
|
||||
(see rpc.dispatcher.RpcDispatcher), pass it the version,
|
||||
method, and args and let it dispatch as appropriate. If not, use
|
||||
the old behavior of magically calling the specified method on the
|
||||
proxy we have here.
|
||||
"""
|
||||
ctxt.update_store()
|
||||
try:
|
||||
rval = self.proxy.dispatch(ctxt, version, method, **args)
|
||||
# Check if the result was a generator
|
||||
if inspect.isgenerator(rval):
|
||||
for x in rval:
|
||||
ctxt.reply(x, None, connection_pool=self.connection_pool)
|
||||
else:
|
||||
ctxt.reply(rval, None, connection_pool=self.connection_pool)
|
||||
# This final None tells multicall that it is done.
|
||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
||||
except Exception:
|
||||
LOG.exception(_('Exception during message handling'))
|
||||
ctxt.reply(None, sys.exc_info(),
|
||||
connection_pool=self.connection_pool)
|
||||
|
||||
|
||||
class MulticallWaiter(object):
|
||||
def __init__(self, conf, connection, timeout):
|
||||
self._connection = connection
|
||||
self._iterator = connection.iterconsume(timeout=timeout or
|
||||
conf.rpc_response_timeout)
|
||||
self._result = None
|
||||
self._done = False
|
||||
self._got_ending = False
|
||||
self._conf = conf
|
||||
|
||||
def done(self):
|
||||
if self._done:
|
||||
return
|
||||
self._done = True
|
||||
self._iterator.close()
|
||||
self._iterator = None
|
||||
self._connection.close()
|
||||
|
||||
def __call__(self, data):
|
||||
"""The consume() callback will call this. Store the result."""
|
||||
if data['failure']:
|
||||
failure = data['failure']
|
||||
self._result = rpc_common.deserialize_remote_exception(self._conf,
|
||||
failure)
|
||||
|
||||
elif data.get('ending', False):
|
||||
self._got_ending = True
|
||||
else:
|
||||
self._result = data['result']
|
||||
|
||||
def __iter__(self):
|
||||
"""Return a result until we get a 'None' response from consumer"""
|
||||
if self._done:
|
||||
raise StopIteration
|
||||
while True:
|
||||
try:
|
||||
self._iterator.next()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.done()
|
||||
if self._got_ending:
|
||||
self.done()
|
||||
raise StopIteration
|
||||
result = self._result
|
||||
if isinstance(result, Exception):
|
||||
self.done()
|
||||
raise result
|
||||
yield result
|
||||
|
||||
|
||||
def create_connection(conf, new, connection_pool):
|
||||
"""Create a connection"""
|
||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Make a call that returns multiple times."""
|
||||
# Can't use 'with' for multicall, as it returns an iterator
|
||||
# that will continue to use the connection. When it's done,
|
||||
# connection.close() will get called which will put it back into
|
||||
# the pool
|
||||
LOG.debug(_('Making asynchronous call on %s ...'), topic)
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
||||
pack_context(msg, context)
|
||||
|
||||
conn = ConnectionContext(conf, connection_pool)
|
||||
wait_msg = MulticallWaiter(conf, conn, timeout)
|
||||
conn.declare_direct_consumer(msg_id, wait_msg)
|
||||
conn.topic_send(topic, msg)
|
||||
return wait_msg
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
|
||||
# NOTE(vish): return the last result from the multicall
|
||||
rv = list(rv)
|
||||
if not rv:
|
||||
return
|
||||
return rv[-1]
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, msg)
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.fanout_send(topic, msg)
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.topic_send(topic, msg)
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
||||
connection_pool):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.fanout_send(topic, msg)
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a notification event on a topic."""
|
||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
||||
dict(event_type=msg.get('event_type'),
|
||||
topic=topic))
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.notify_send(topic, msg)
|
||||
|
||||
|
||||
def cleanup(connection_pool):
|
||||
if connection_pool:
|
||||
connection_pool.empty()
|
||||
|
||||
|
||||
def get_control_exchange(conf):
|
||||
try:
|
||||
return conf.control_exchange
|
||||
except cfg.NoSuchOptError:
|
||||
return 'openstack'
|
@ -1,311 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import traceback
|
||||
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import importutils
|
||||
from cloudbaseinit.openstack.common import jsonutils
|
||||
from cloudbaseinit.openstack.common import local
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RPCException(Exception):
|
||||
message = _("An unknown RPC related exception occurred.")
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
if not message:
|
||||
try:
|
||||
message = self.message % kwargs
|
||||
|
||||
except Exception:
|
||||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
LOG.exception(_('Exception in string format operation'))
|
||||
for name, value in kwargs.iteritems():
|
||||
LOG.error("%s: %s" % (name, value))
|
||||
# at least get the core message out if something happened
|
||||
message = self.message
|
||||
|
||||
super(RPCException, self).__init__(message)
|
||||
|
||||
|
||||
class RemoteError(RPCException):
|
||||
"""Signifies that a remote class has raised an exception.
|
||||
|
||||
Contains a string representation of the type of the original exception,
|
||||
the value of the original exception, and the traceback. These are
|
||||
sent to the parent as a joined string so printing the exception
|
||||
contains all of the relevant info.
|
||||
|
||||
"""
|
||||
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
|
||||
|
||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
||||
self.exc_type = exc_type
|
||||
self.value = value
|
||||
self.traceback = traceback
|
||||
super(RemoteError, self).__init__(exc_type=exc_type,
|
||||
value=value,
|
||||
traceback=traceback)
|
||||
|
||||
|
||||
class Timeout(RPCException):
|
||||
"""Signifies that a timeout has occurred.
|
||||
|
||||
This exception is raised if the rpc_response_timeout is reached while
|
||||
waiting for a response from the remote side.
|
||||
"""
|
||||
message = _("Timeout while waiting on RPC response.")
|
||||
|
||||
|
||||
class InvalidRPCConnectionReuse(RPCException):
|
||||
message = _("Invalid reuse of an RPC connection.")
|
||||
|
||||
|
||||
class UnsupportedRpcVersion(RPCException):
|
||||
message = _("Specified RPC version, %(version)s, not supported by "
|
||||
"this endpoint.")
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""A connection, returned by rpc.create_connection().
|
||||
|
||||
This class represents a connection to the message bus used for rpc.
|
||||
An instance of this class should never be created by users of the rpc API.
|
||||
Use rpc.create_connection() instead.
|
||||
"""
|
||||
def close(self):
|
||||
"""Close the connection.
|
||||
|
||||
This method must be called when the connection will no longer be used.
|
||||
It will ensure that any resources associated with the connection, such
|
||||
as a network connection, and cleaned up.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer on this connection.
|
||||
|
||||
A consumer is associated with a message queue on the backend message
|
||||
bus. The consumer will read messages from the queue, unpack them, and
|
||||
dispatch them to the proxy object. The contents of the message pulled
|
||||
off of the queue will determine which method gets called on the proxy
|
||||
object.
|
||||
|
||||
:param topic: This is a name associated with what to consume from.
|
||||
Multiple instances of a service may consume from the same
|
||||
topic. For example, all instances of nova-compute consume
|
||||
from a queue called "compute". In that case, the
|
||||
messages will get distributed amongst the consumers in a
|
||||
round-robin fashion if fanout=False. If fanout=True,
|
||||
every consumer associated with this topic will get a
|
||||
copy of every message.
|
||||
:param proxy: The object that will handle all incoming messages.
|
||||
:param fanout: Whether or not this is a fanout topic. See the
|
||||
documentation for the topic parameter for some
|
||||
additional comments on this.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker on this connection.
|
||||
|
||||
A worker is like a regular consumer of messages directed to a
|
||||
topic, except that it is part of a set of such consumers (the
|
||||
"pool") which may run in parallel. Every pool of workers will
|
||||
receive a given message, but only one worker in the pool will
|
||||
be asked to process it. Load is distributed across the members
|
||||
of the pool in round-robin fashion.
|
||||
|
||||
:param topic: This is a name associated with what to consume from.
|
||||
Multiple instances of a service may consume from the same
|
||||
topic.
|
||||
:param proxy: The object that will handle all incoming messages.
|
||||
:param pool_name: String containing the name of the pool of workers
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Spawn a thread to handle incoming messages.
|
||||
|
||||
Spawn a thread that will be responsible for handling all incoming
|
||||
messages for consumers that were set up on this connection.
|
||||
|
||||
Message dispatching inside of this is expected to be implemented in a
|
||||
non-blocking manner. An example implementation would be having this
|
||||
thread pull messages in for all of the consumers, but utilize a thread
|
||||
pool for dispatching the messages to the proxy objects.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def _safe_log(log_func, msg, msg_data):
|
||||
"""Sanitizes the msg_data field before logging."""
|
||||
SANITIZE = {'set_admin_password': ('new_pass',),
|
||||
'run_instance': ('admin_password',), }
|
||||
|
||||
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
|
||||
has_context_token = '_context_auth_token' in msg_data
|
||||
has_token = 'auth_token' in msg_data
|
||||
|
||||
if not any([has_method, has_context_token, has_token]):
|
||||
return log_func(msg, msg_data)
|
||||
|
||||
msg_data = copy.deepcopy(msg_data)
|
||||
|
||||
if has_method:
|
||||
method = msg_data['method']
|
||||
if method in SANITIZE:
|
||||
args_to_sanitize = SANITIZE[method]
|
||||
for arg in args_to_sanitize:
|
||||
try:
|
||||
msg_data['args'][arg] = "<SANITIZED>"
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if has_context_token:
|
||||
msg_data['_context_auth_token'] = '<SANITIZED>'
|
||||
|
||||
if has_token:
|
||||
msg_data['auth_token'] = '<SANITIZED>'
|
||||
|
||||
return log_func(msg, msg_data)
|
||||
|
||||
|
||||
def serialize_remote_exception(failure_info):
|
||||
"""Prepares exception data to be sent over rpc.
|
||||
|
||||
Failure_info should be a sys.exc_info() tuple.
|
||||
|
||||
"""
|
||||
tb = traceback.format_exception(*failure_info)
|
||||
failure = failure_info[1]
|
||||
LOG.error(_("Returning exception %s to caller"), unicode(failure))
|
||||
LOG.error(tb)
|
||||
|
||||
kwargs = {}
|
||||
if hasattr(failure, 'kwargs'):
|
||||
kwargs = failure.kwargs
|
||||
|
||||
data = {
|
||||
'class': str(failure.__class__.__name__),
|
||||
'module': str(failure.__class__.__module__),
|
||||
'message': unicode(failure),
|
||||
'tb': tb,
|
||||
'args': failure.args,
|
||||
'kwargs': kwargs
|
||||
}
|
||||
|
||||
json_data = jsonutils.dumps(data)
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
def deserialize_remote_exception(conf, data):
|
||||
failure = jsonutils.loads(str(data))
|
||||
|
||||
trace = failure.get('tb', [])
|
||||
message = failure.get('message', "") + "\n" + "\n".join(trace)
|
||||
name = failure.get('class')
|
||||
module = failure.get('module')
|
||||
|
||||
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
|
||||
# order to prevent arbitrary code execution.
|
||||
if not module in conf.allowed_rpc_exception_modules:
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
try:
|
||||
mod = importutils.import_module(module)
|
||||
klass = getattr(mod, name)
|
||||
if not issubclass(klass, Exception):
|
||||
raise TypeError("Can only deserialize Exceptions")
|
||||
|
||||
failure = klass(**failure.get('kwargs', {}))
|
||||
except (AttributeError, TypeError, ImportError):
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
ex_type = type(failure)
|
||||
str_override = lambda self: message
|
||||
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
|
||||
{'__str__': str_override, '__unicode__': str_override})
|
||||
try:
|
||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
||||
# as the new type for the exception. This only works on user defined
|
||||
# Exceptions and not core python exceptions. This is important because
|
||||
# we cannot necessarily change an exception message so we must override
|
||||
# the __str__ method.
|
||||
failure.__class__ = new_ex_type
|
||||
except TypeError:
|
||||
# NOTE(ameade): If a core exception then just add the traceback to the
|
||||
# first exception argument.
|
||||
failure.args = (message,) + failure.args[1:]
|
||||
return failure
|
||||
|
||||
|
||||
class CommonRpcContext(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.values = kwargs
|
||||
|
||||
def __getattr__(self, key):
|
||||
try:
|
||||
return self.values[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def to_dict(self):
|
||||
return copy.deepcopy(self.values)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
def deepcopy(self):
|
||||
return self.from_dict(self.to_dict())
|
||||
|
||||
def update_store(self):
|
||||
local.store.context = self
|
||||
|
||||
def elevated(self, read_deleted=None, overwrite=False):
|
||||
"""Return a version of this context with admin flag set."""
|
||||
# TODO(russellb) This method is a bit of a nova-ism. It makes
|
||||
# some assumptions about the data in the request context sent
|
||||
# across rpc, while the rest of this class does not. We could get
|
||||
# rid of this if we changed the nova code that uses this to
|
||||
# convert the RpcContext back to its native RequestContext doing
|
||||
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
|
||||
|
||||
context = self.deepcopy()
|
||||
context.values['is_admin'] = True
|
||||
|
||||
context.values.setdefault('roles', [])
|
||||
|
||||
if 'admin' not in context.values['roles']:
|
||||
context.values['roles'].append('admin')
|
||||
|
||||
if read_deleted is not None:
|
||||
context.values['read_deleted'] = read_deleted
|
||||
|
||||
return context
|
@ -1,152 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Code for rpc message dispatching.
|
||||
|
||||
Messages that come in have a version number associated with them. RPC API
|
||||
version numbers are in the form:
|
||||
|
||||
Major.Minor
|
||||
|
||||
For a given message with version X.Y, the receiver must be marked as able to
|
||||
handle messages of version A.B, where:
|
||||
|
||||
A = X
|
||||
|
||||
B >= Y
|
||||
|
||||
The Major version number would be incremented for an almost completely new API.
|
||||
The Minor version number would be incremented for backwards compatible changes
|
||||
to an existing API. A backwards compatible change could be something like
|
||||
adding a new method, adding an argument to an existing method (but not
|
||||
requiring it), or changing the type for an existing argument (but still
|
||||
handling the old type as well).
|
||||
|
||||
The conversion over to a versioned API must be done on both the client side and
|
||||
server side of the API at the same time. However, as the code stands today,
|
||||
there can be both versioned and unversioned APIs implemented in the same code
|
||||
base.
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
||||
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
|
||||
API as an example. The client side is in nova/compute/rpcapi.py and the server
|
||||
side is in nova/compute/manager.py.
|
||||
|
||||
|
||||
Example 1) Adding a new method.
|
||||
-------------------------------
|
||||
|
||||
Adding a new method is a backwards compatible change. It should be added to
|
||||
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
|
||||
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
|
||||
have a specific version specified to indicate the minimum API version that must
|
||||
be implemented for the method to be supported. For example::
|
||||
|
||||
def get_host_uptime(self, ctxt, host):
|
||||
topic = _compute_topic(self.topic, ctxt, host, None)
|
||||
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
|
||||
version='1.1')
|
||||
|
||||
In this case, version '1.1' is the first version that supported the
|
||||
get_host_uptime() method.
|
||||
|
||||
|
||||
Example 2) Adding a new parameter.
|
||||
----------------------------------
|
||||
|
||||
Adding a new parameter to an rpc method can be made backwards compatible. The
|
||||
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
|
||||
The implementation of the method must not expect the parameter to be present.::
|
||||
|
||||
def some_remote_method(self, arg1, arg2, newarg=None):
|
||||
# The code needs to deal with newarg=None for cases
|
||||
# where an older client sends a message without it.
|
||||
pass
|
||||
|
||||
On the client side, the same changes should be made as in example 1. The
|
||||
minimum version that supports the new parameter should be specified.
|
||||
"""
|
||||
|
||||
from cloudbaseinit.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
class RpcDispatcher(object):
|
||||
"""Dispatch rpc messages according to the requested API version.
|
||||
|
||||
This class can be used as the top level 'manager' for a service. It
|
||||
contains a list of underlying managers that have an API_VERSION attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, callbacks):
|
||||
"""Initialize the rpc dispatcher.
|
||||
|
||||
:param callbacks: List of proxy objects that are an instance
|
||||
of a class with rpc methods exposed. Each proxy
|
||||
object should have an RPC_API_VERSION attribute.
|
||||
"""
|
||||
self.callbacks = callbacks
|
||||
super(RpcDispatcher, self).__init__()
|
||||
|
||||
@staticmethod
|
||||
def _is_compatible(mversion, version):
|
||||
"""Determine whether versions are compatible.
|
||||
|
||||
:param mversion: The API version implemented by a callback.
|
||||
:param version: The API version requested by an incoming message.
|
||||
"""
|
||||
version_parts = version.split('.')
|
||||
mversion_parts = mversion.split('.')
|
||||
if int(version_parts[0]) != int(mversion_parts[0]): # Major
|
||||
return False
|
||||
if int(version_parts[1]) > int(mversion_parts[1]): # Minor
|
||||
return False
|
||||
return True
|
||||
|
||||
def dispatch(self, ctxt, version, method, **kwargs):
|
||||
"""Dispatch a message based on a requested version.
|
||||
|
||||
:param ctxt: The request context
|
||||
:param version: The requested API version from the incoming message
|
||||
:param method: The method requested to be called by the incoming
|
||||
message.
|
||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
||||
|
||||
:returns: Whatever is returned by the underlying method that gets
|
||||
called.
|
||||
"""
|
||||
if not version:
|
||||
version = '1.0'
|
||||
|
||||
had_compatible = False
|
||||
for proxyobj in self.callbacks:
|
||||
if hasattr(proxyobj, 'RPC_API_VERSION'):
|
||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
||||
else:
|
||||
rpc_api_version = '1.0'
|
||||
is_compatible = self._is_compatible(rpc_api_version, version)
|
||||
had_compatible = had_compatible or is_compatible
|
||||
if not hasattr(proxyobj, method):
|
||||
continue
|
||||
if is_compatible:
|
||||
return getattr(proxyobj, method)(ctxt, **kwargs)
|
||||
|
||||
if had_compatible:
|
||||
raise AttributeError("No such RPC function '%s'" % method)
|
||||
else:
|
||||
raise rpc_common.UnsupportedRpcVersion(version=version)
|
@ -1,184 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Fake RPC implementation which calls proxy methods directly with no
|
||||
queues. Casts will block, but this is very useful for tests.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
|
||||
from cloudbaseinit.openstack.common import jsonutils
|
||||
from cloudbaseinit.openstack.common.rpc import common as rpc_common
|
||||
|
||||
CONSUMERS = {}
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
def __init__(self, **kwargs):
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
self._response = []
|
||||
self._done = False
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
new_inst = self.__class__(**values)
|
||||
new_inst._response = self._response
|
||||
new_inst._done = self._done
|
||||
return new_inst
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False):
|
||||
if ending:
|
||||
self._done = True
|
||||
if not self._done:
|
||||
self._response.append((reply, failure))
|
||||
|
||||
|
||||
class Consumer(object):
|
||||
def __init__(self, topic, proxy):
|
||||
self.topic = topic
|
||||
self.proxy = proxy
|
||||
|
||||
def call(self, context, version, method, args, timeout):
|
||||
done = eventlet.event.Event()
|
||||
|
||||
def _inner():
|
||||
ctxt = RpcContext.from_dict(context.to_dict())
|
||||
try:
|
||||
rval = self.proxy.dispatch(context, version, method, **args)
|
||||
res = []
|
||||
# Caller might have called ctxt.reply() manually
|
||||
for (reply, failure) in ctxt._response:
|
||||
if failure:
|
||||
raise failure[0], failure[1], failure[2]
|
||||
res.append(reply)
|
||||
# if ending not 'sent'...we might have more data to
|
||||
# return from the function itself
|
||||
if not ctxt._done:
|
||||
if inspect.isgenerator(rval):
|
||||
for val in rval:
|
||||
res.append(val)
|
||||
else:
|
||||
res.append(rval)
|
||||
done.send(res)
|
||||
except Exception as e:
|
||||
done.send_exception(e)
|
||||
|
||||
thread = eventlet.greenthread.spawn(_inner)
|
||||
|
||||
if timeout:
|
||||
start_time = time.time()
|
||||
while not done.ready():
|
||||
eventlet.greenthread.sleep(1)
|
||||
cur_time = time.time()
|
||||
if (cur_time - start_time) > timeout:
|
||||
thread.kill()
|
||||
raise rpc_common.Timeout()
|
||||
|
||||
return done.wait()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
def __init__(self):
|
||||
self.consumers = []
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
consumer = Consumer(topic, proxy)
|
||||
self.consumers.append(consumer)
|
||||
if topic not in CONSUMERS:
|
||||
CONSUMERS[topic] = []
|
||||
CONSUMERS[topic].append(consumer)
|
||||
|
||||
def close(self):
|
||||
for consumer in self.consumers:
|
||||
CONSUMERS[consumer.topic].remove(consumer)
|
||||
self.consumers = []
|
||||
|
||||
def consume_in_thread(self):
|
||||
pass
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
return Connection()
|
||||
|
||||
|
||||
def check_serialize(msg):
|
||||
"""Make sure a message intended for rpc can be serialized."""
|
||||
jsonutils.dumps(msg)
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
|
||||
check_serialize(msg)
|
||||
|
||||
method = msg.get('method')
|
||||
if not method:
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
|
||||
try:
|
||||
consumer = CONSUMERS[topic][0]
|
||||
except (KeyError, IndexError):
|
||||
return iter([None])
|
||||
else:
|
||||
return consumer.call(context, version, method, args, timeout)
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
rv = multicall(conf, context, topic, msg, timeout)
|
||||
# NOTE(vish): return the last result from the multicall
|
||||
rv = list(rv)
|
||||
if not rv:
|
||||
return
|
||||
return rv[-1]
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
try:
|
||||
call(conf, context, topic, msg)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg):
|
||||
check_serialize(msg)
|
||||
|
||||
|
||||
def cleanup():
|
||||
pass
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Cast to all consumers of a topic"""
|
||||
check_serialize(msg)
|
||||
method = msg.get('method')
|
||||
if not method:
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
|
||||
for consumer in CONSUMERS.get(topic, []):
|
||||
try:
|
||||
consumer.call(context, version, method, args, None)
|
||||
except Exception:
|
||||
pass
|
@ -1,793 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
import kombu
|
||||
import kombu.connection
|
||||
import kombu.entity
|
||||
import kombu.messaging
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import network_utils
|
||||
from cloudbaseinit.openstack.common.rpc import amqp as rpc_amqp
|
||||
from cloudbaseinit.openstack.common.rpc import common as rpc_common
|
||||
|
||||
kombu_opts = [
|
||||
cfg.StrOpt('kombu_ssl_version',
|
||||
default='',
|
||||
help='SSL version to use (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_keyfile',
|
||||
default='',
|
||||
help='SSL key file (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_certfile',
|
||||
default='',
|
||||
help='SSL cert file (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_ca_certs',
|
||||
default='',
|
||||
help=('SSL certification authority file '
|
||||
'(valid only if SSL enabled)')),
|
||||
cfg.StrOpt('rabbit_host',
|
||||
default='localhost',
|
||||
help='The RabbitMQ broker address where a single node is used'),
|
||||
cfg.IntOpt('rabbit_port',
|
||||
default=5672,
|
||||
help='The RabbitMQ broker port where a single node is used'),
|
||||
cfg.ListOpt('rabbit_hosts',
|
||||
default=['$rabbit_host:$rabbit_port'],
|
||||
help='RabbitMQ HA cluster host:port pairs'),
|
||||
cfg.BoolOpt('rabbit_use_ssl',
|
||||
default=False,
|
||||
help='connect over SSL for RabbitMQ'),
|
||||
cfg.StrOpt('rabbit_userid',
|
||||
default='guest',
|
||||
help='the RabbitMQ userid'),
|
||||
cfg.StrOpt('rabbit_password',
|
||||
default='guest',
|
||||
help='the RabbitMQ password'),
|
||||
cfg.StrOpt('rabbit_virtual_host',
|
||||
default='/',
|
||||
help='the RabbitMQ virtual host'),
|
||||
cfg.IntOpt('rabbit_retry_interval',
|
||||
default=1,
|
||||
help='how frequently to retry connecting with RabbitMQ'),
|
||||
cfg.IntOpt('rabbit_retry_backoff',
|
||||
default=2,
|
||||
help='how long to backoff for between retries when connecting '
|
||||
'to RabbitMQ'),
|
||||
cfg.IntOpt('rabbit_max_retries',
|
||||
default=0,
|
||||
help='maximum retries with trying to connect to RabbitMQ '
|
||||
'(the default of 0 implies an infinite retry count)'),
|
||||
cfg.BoolOpt('rabbit_durable_queues',
|
||||
default=False,
|
||||
help='use durable queues in RabbitMQ'),
|
||||
cfg.BoolOpt('rabbit_ha_queues',
|
||||
default=False,
|
||||
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
|
||||
'You need to wipe RabbitMQ database when '
|
||||
'changing this option.'),
|
||||
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(kombu_opts)
|
||||
|
||||
LOG = rpc_common.LOG
|
||||
|
||||
|
||||
def _get_queue_arguments(conf):
|
||||
"""Construct the arguments for declaring a queue.
|
||||
|
||||
If the rabbit_ha_queues option is set, we declare a mirrored queue
|
||||
as described here:
|
||||
|
||||
http://www.rabbitmq.com/ha.html
|
||||
|
||||
Setting x-ha-policy to all means that the queue will be mirrored
|
||||
to all nodes in the cluster.
|
||||
"""
|
||||
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Consumer base class."""
|
||||
|
||||
def __init__(self, channel, callback, tag, **kwargs):
|
||||
"""Declare a queue on an amqp channel.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
queue name, exchange name, and other kombu options are
|
||||
passed in here as a dictionary.
|
||||
"""
|
||||
self.callback = callback
|
||||
self.tag = str(tag)
|
||||
self.kwargs = kwargs
|
||||
self.queue = None
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-declare the queue after a rabbit reconnect"""
|
||||
self.channel = channel
|
||||
self.kwargs['channel'] = channel
|
||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
||||
self.queue.declare()
|
||||
|
||||
def consume(self, *args, **kwargs):
|
||||
"""Actually declare the consumer on the amqp channel. This will
|
||||
start the flow of messages from the queue. Using the
|
||||
Connection.iterconsume() iterator will process the messages,
|
||||
calling the appropriate callback.
|
||||
|
||||
If a callback is specified in kwargs, use that. Otherwise,
|
||||
use the callback passed during __init__()
|
||||
|
||||
If kwargs['nowait'] is True, then this call will block until
|
||||
a message is read.
|
||||
|
||||
Messages will automatically be acked if the callback doesn't
|
||||
raise an exception
|
||||
"""
|
||||
|
||||
options = {'consumer_tag': self.tag}
|
||||
options['nowait'] = kwargs.get('nowait', False)
|
||||
callback = kwargs.get('callback', self.callback)
|
||||
if not callback:
|
||||
raise ValueError("No callback defined")
|
||||
|
||||
def _callback(raw_message):
|
||||
message = self.channel.message_to_python(raw_message)
|
||||
try:
|
||||
callback(message.payload)
|
||||
message.ack()
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
|
||||
self.queue.consume(*args, callback=_callback, **options)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the consuming from the queue, if it has started"""
|
||||
try:
|
||||
self.queue.cancel(self.tag)
|
||||
except KeyError, e:
|
||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
||||
if str(e) != "u'%s'" % self.tag:
|
||||
raise
|
||||
self.queue = None
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'"""
|
||||
|
||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
||||
"""Init a 'direct' queue.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'msg_id' is the msg_id to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
Other kombu options may be passed
|
||||
"""
|
||||
# Default options
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=msg_id,
|
||||
type='direct',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(DirectConsumer, self).__init__(channel,
|
||||
callback,
|
||||
tag,
|
||||
name=msg_id,
|
||||
exchange=exchange,
|
||||
routing_key=msg_id,
|
||||
**options)
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'"""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
||||
exchange_name=None, **kwargs):
|
||||
"""Init a 'topic' queue.
|
||||
|
||||
:param channel: the amqp channel to use
|
||||
:param topic: the topic to listen on
|
||||
:paramtype topic: str
|
||||
:param callback: the callback to call when messages are received
|
||||
:param tag: a unique ID for the consumer on the channel
|
||||
:param name: optional queue name, defaults to topic
|
||||
:paramtype name: str
|
||||
|
||||
Other kombu options may be passed as keyword arguments
|
||||
"""
|
||||
# Default options
|
||||
options = {'durable': conf.rabbit_durable_queues,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': False,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||
exchange = kombu.entity.Exchange(name=exchange_name,
|
||||
type='topic',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(TopicConsumer, self).__init__(channel,
|
||||
callback,
|
||||
tag,
|
||||
name=name or topic,
|
||||
exchange=exchange,
|
||||
routing_key=topic,
|
||||
**options)
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'"""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
||||
"""Init a 'fanout' queue.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'topic' is the topic to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
Other kombu options may be passed
|
||||
"""
|
||||
unique = uuid.uuid4().hex
|
||||
exchange_name = '%s_fanout' % topic
|
||||
queue_name = '%s_fanout_%s' % (topic, unique)
|
||||
|
||||
# Default options
|
||||
options = {'durable': False,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(FanoutConsumer, self).__init__(channel, callback, tag,
|
||||
name=queue_name,
|
||||
exchange=exchange,
|
||||
routing_key=topic,
|
||||
**options)
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class"""
|
||||
|
||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
and other options
|
||||
"""
|
||||
self.exchange_name = exchange_name
|
||||
self.routing_key = routing_key
|
||||
self.kwargs = kwargs
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-establish the Producer after a rabbit reconnection"""
|
||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
||||
**self.kwargs)
|
||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
||||
channel=channel,
|
||||
routing_key=self.routing_key)
|
||||
|
||||
def send(self, msg):
|
||||
"""Send a message"""
|
||||
self.producer.publish(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'"""
|
||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
||||
"""init a 'direct' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
options.update(kwargs)
|
||||
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
|
||||
type='direct', **options)
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'"""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'topic' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
options = {'durable': conf.rabbit_durable_queues,
|
||||
'auto_delete': False,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicPublisher, self).__init__(channel,
|
||||
exchange_name,
|
||||
topic,
|
||||
type='topic',
|
||||
**options)
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'"""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'fanout' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
options.update(kwargs)
|
||||
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
|
||||
None, type='fanout', **options)
|
||||
|
||||
|
||||
class NotifyPublisher(TopicPublisher):
|
||||
"""Publisher class for 'notify'"""
|
||||
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
|
||||
self.queue_arguments = _get_queue_arguments(conf)
|
||||
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
|
||||
|
||||
def reconnect(self, channel):
|
||||
super(NotifyPublisher, self).reconnect(channel)
|
||||
|
||||
# NOTE(jerdfelt): Normally the consumer would create the queue, but
|
||||
# we do this to ensure that messages don't get dropped if the
|
||||
# consumer is started after we do
|
||||
queue = kombu.entity.Queue(channel=channel,
|
||||
exchange=self.exchange,
|
||||
durable=self.durable,
|
||||
name=self.routing_key,
|
||||
routing_key=self.routing_key,
|
||||
queue_arguments=self.queue_arguments)
|
||||
queue.declare()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
pool = None
|
||||
|
||||
def __init__(self, conf, server_params=None):
|
||||
self.consumers = []
|
||||
self.consumer_thread = None
|
||||
self.conf = conf
|
||||
self.max_retries = self.conf.rabbit_max_retries
|
||||
# Try forever?
|
||||
if self.max_retries <= 0:
|
||||
self.max_retries = None
|
||||
self.interval_start = self.conf.rabbit_retry_interval
|
||||
self.interval_stepping = self.conf.rabbit_retry_backoff
|
||||
# max retry-interval = 30 seconds
|
||||
self.interval_max = 30
|
||||
self.memory_transport = False
|
||||
|
||||
if server_params is None:
|
||||
server_params = {}
|
||||
# Keys to translate from server_params to kombu params
|
||||
server_params_to_kombu_params = {'username': 'userid'}
|
||||
|
||||
ssl_params = self._fetch_ssl_params()
|
||||
params_list = []
|
||||
for adr in self.conf.rabbit_hosts:
|
||||
hostname, port = network_utils.parse_host_port(
|
||||
adr, default_port=self.conf.rabbit_port)
|
||||
|
||||
params = {
|
||||
'hostname': hostname,
|
||||
'port': port,
|
||||
'userid': self.conf.rabbit_userid,
|
||||
'password': self.conf.rabbit_password,
|
||||
'virtual_host': self.conf.rabbit_virtual_host,
|
||||
}
|
||||
|
||||
for sp_key, value in server_params.iteritems():
|
||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
||||
params[p_key] = value
|
||||
|
||||
if self.conf.fake_rabbit:
|
||||
params['transport'] = 'memory'
|
||||
if self.conf.rabbit_use_ssl:
|
||||
params['ssl'] = ssl_params
|
||||
|
||||
params_list.append(params)
|
||||
|
||||
self.params_list = params_list
|
||||
|
||||
self.memory_transport = self.conf.fake_rabbit
|
||||
|
||||
self.connection = None
|
||||
self.reconnect()
|
||||
|
||||
def _fetch_ssl_params(self):
|
||||
"""Handles fetching what ssl params
|
||||
should be used for the connection (if any)"""
|
||||
ssl_params = dict()
|
||||
|
||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
||||
if self.conf.kombu_ssl_version:
|
||||
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
|
||||
if self.conf.kombu_ssl_keyfile:
|
||||
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
|
||||
if self.conf.kombu_ssl_certfile:
|
||||
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
|
||||
if self.conf.kombu_ssl_ca_certs:
|
||||
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
|
||||
# We might want to allow variations in the
|
||||
# future with this?
|
||||
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
||||
|
||||
if not ssl_params:
|
||||
# Just have the default behavior
|
||||
return True
|
||||
else:
|
||||
# Return the extended behavior
|
||||
return ssl_params
|
||||
|
||||
def _connect(self, params):
|
||||
"""Connect to rabbit. Re-establish any queues that may have
|
||||
been declared before if we are reconnecting. Exceptions should
|
||||
be handled by the caller.
|
||||
"""
|
||||
if self.connection:
|
||||
LOG.info(_("Reconnecting to AMQP server on "
|
||||
"%(hostname)s:%(port)d") % params)
|
||||
try:
|
||||
self.connection.close()
|
||||
except self.connection_errors:
|
||||
pass
|
||||
# Setting this in case the next statement fails, though
|
||||
# it shouldn't be doing any network operations, yet.
|
||||
self.connection = None
|
||||
self.connection = kombu.connection.BrokerConnection(**params)
|
||||
self.connection_errors = self.connection.connection_errors
|
||||
if self.memory_transport:
|
||||
# Kludge to speed up tests.
|
||||
self.connection.transport.polling_interval = 0.0
|
||||
self.consumer_num = itertools.count(1)
|
||||
self.connection.connect()
|
||||
self.channel = self.connection.channel()
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
if self.memory_transport:
|
||||
self.channel._new_queue('ae.undeliver')
|
||||
for consumer in self.consumers:
|
||||
consumer.reconnect(self.channel)
|
||||
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
|
||||
params)
|
||||
|
||||
def reconnect(self):
|
||||
"""Handles reconnecting and re-establishing queues.
|
||||
Will retry up to self.max_retries number of times.
|
||||
self.max_retries = 0 means to retry forever.
|
||||
Sleep between tries, starting at self.interval_start
|
||||
seconds, backing off self.interval_stepping number of seconds
|
||||
each attempt.
|
||||
"""
|
||||
|
||||
attempt = 0
|
||||
while True:
|
||||
params = self.params_list[attempt % len(self.params_list)]
|
||||
attempt += 1
|
||||
try:
|
||||
self._connect(params)
|
||||
return
|
||||
except (IOError, self.connection_errors) as e:
|
||||
pass
|
||||
except Exception, e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
# a protocol response. (See paste link in LP888621)
|
||||
# So, we check all exceptions for 'timeout' in them
|
||||
# and try to reconnect in this case.
|
||||
if 'timeout' not in str(e):
|
||||
raise
|
||||
|
||||
log_info = {}
|
||||
log_info['err_str'] = str(e)
|
||||
log_info['max_retries'] = self.max_retries
|
||||
log_info.update(params)
|
||||
|
||||
if self.max_retries and attempt == self.max_retries:
|
||||
LOG.error(_('Unable to connect to AMQP server on '
|
||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
||||
'tries: %(err_str)s') % log_info)
|
||||
# NOTE(comstud): Copied from original code. There's
|
||||
# really no better recourse because if this was a queue we
|
||||
# need to consume on, we have no way to consume anymore.
|
||||
sys.exit(1)
|
||||
|
||||
if attempt == 1:
|
||||
sleep_time = self.interval_start or 1
|
||||
elif attempt > 1:
|
||||
sleep_time += self.interval_stepping
|
||||
if self.interval_max:
|
||||
sleep_time = min(sleep_time, self.interval_max)
|
||||
|
||||
log_info['sleep_time'] = sleep_time
|
||||
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
|
||||
'unreachable: %(err_str)s. Trying again in '
|
||||
'%(sleep_time)d seconds.') % log_info)
|
||||
time.sleep(sleep_time)
|
||||
|
||||
def ensure(self, error_callback, method, *args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (self.connection_errors, socket.timeout, IOError), e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
except Exception, e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
# a protocol response. (See paste link in LP888621)
|
||||
# So, we check all exceptions for 'timeout' in them
|
||||
# and try to reconnect in this case.
|
||||
if 'timeout' not in str(e):
|
||||
raise
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
|
||||
def get_channel(self):
|
||||
"""Convenience call for bin/clear_rabbit_queues"""
|
||||
return self.channel
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
self.cancel_consumer_thread()
|
||||
self.connection.release()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
self.cancel_consumer_thread()
|
||||
self.channel.close()
|
||||
self.channel = self.connection.channel()
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
if self.memory_transport:
|
||||
self.channel._new_queue('ae.undeliver')
|
||||
self.consumers = []
|
||||
|
||||
def declare_consumer(self, consumer_cls, topic, callback):
|
||||
"""Create a Consumer using the class that was passed in and
|
||||
add it to our list of consumers
|
||||
"""
|
||||
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
||||
"%(err_str)s") % log_info)
|
||||
|
||||
def _declare_consumer():
|
||||
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
||||
self.consumer_num.next())
|
||||
self.consumers.append(consumer)
|
||||
return consumer
|
||||
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers"""
|
||||
|
||||
info = {'do_consume': True}
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, socket.timeout):
|
||||
LOG.exception(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
str(exc))
|
||||
info['do_consume'] = True
|
||||
|
||||
def _consume():
|
||||
if info['do_consume']:
|
||||
queues_head = self.consumers[:-1]
|
||||
queues_tail = self.consumers[-1]
|
||||
for queue in queues_head:
|
||||
queue.consume(nowait=True)
|
||||
queues_tail.consume(nowait=False)
|
||||
info['do_consume'] = False
|
||||
return self.connection.drain_events(timeout=timeout)
|
||||
|
||||
for iteration in itertools.count(0):
|
||||
if limit and iteration >= limit:
|
||||
raise StopIteration
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread"""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
self.consumer_thread.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def publisher_send(self, cls, topic, msg, **kwargs):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
|
||||
def _error_callback(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.exception(_("Failed to publish message to topic "
|
||||
"'%(topic)s': %(err_str)s") % log_info)
|
||||
|
||||
def _publish():
|
||||
publisher = cls(self.conf, self.channel, topic, **kwargs)
|
||||
publisher.send(msg)
|
||||
|
||||
self.ensure(_error_callback, _publish)
|
||||
|
||||
def declare_direct_consumer(self, topic, callback):
|
||||
"""Create a 'direct' queue.
|
||||
In nova's use, this is generally a msg_id queue used for
|
||||
responses for call/multicall
|
||||
"""
|
||||
self.declare_consumer(DirectConsumer, topic, callback)
|
||||
|
||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||
exchange_name=None):
|
||||
"""Create a 'topic' consumer."""
|
||||
self.declare_consumer(functools.partial(TopicConsumer,
|
||||
name=queue_name,
|
||||
exchange_name=exchange_name,
|
||||
),
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer"""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message"""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg):
|
||||
"""Send a 'topic' message"""
|
||||
self.publisher_send(TopicPublisher, topic, msg)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic"""
|
||||
self.publisher_send(NotifyPublisher, topic, msg, **kwargs)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers"""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread"""
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
except greenlet.GreenletExit:
|
||||
return
|
||||
if self.consumer_thread is None:
|
||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
|
||||
if fanout:
|
||||
self.declare_fanout_consumer(topic, proxy_cb)
|
||||
else:
|
||||
self.declare_topic_consumer(topic, proxy_cb)
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
return rpc_amqp.multicall(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
return rpc_amqp.call(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
return rpc_amqp.cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
return rpc_amqp.fanout_cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
return rpc_amqp.cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
return rpc_amqp.fanout_cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cleanup():
|
||||
return rpc_amqp.cleanup(Connection.pool)
|
@ -1,585 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC
|
||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
import qpid.messaging
|
||||
import qpid.messaging.exceptions
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import jsonutils
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
from cloudbaseinit.openstack.common.rpc import amqp as rpc_amqp
|
||||
from cloudbaseinit.openstack.common.rpc import common as rpc_common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
qpid_opts = [
|
||||
cfg.StrOpt('qpid_hostname',
|
||||
default='localhost',
|
||||
help='Qpid broker hostname'),
|
||||
cfg.StrOpt('qpid_port',
|
||||
default='5672',
|
||||
help='Qpid broker port'),
|
||||
cfg.ListOpt('qpid_hosts',
|
||||
default=['$qpid_hostname:$qpid_port'],
|
||||
help='Qpid HA cluster host:port pairs'),
|
||||
cfg.StrOpt('qpid_username',
|
||||
default='',
|
||||
help='Username for qpid connection'),
|
||||
cfg.StrOpt('qpid_password',
|
||||
default='',
|
||||
help='Password for qpid connection'),
|
||||
cfg.StrOpt('qpid_sasl_mechanisms',
|
||||
default='',
|
||||
help='Space separated list of SASL mechanisms to use for auth'),
|
||||
cfg.IntOpt('qpid_heartbeat',
|
||||
default=60,
|
||||
help='Seconds between connection keepalive heartbeats'),
|
||||
cfg.StrOpt('qpid_protocol',
|
||||
default='tcp',
|
||||
help="Transport to use, either 'tcp' or 'ssl'"),
|
||||
cfg.BoolOpt('qpid_tcp_nodelay',
|
||||
default=True,
|
||||
help='Disable Nagle algorithm'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(qpid_opts)
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Consumer base class."""
|
||||
|
||||
def __init__(self, session, callback, node_name, node_opts,
|
||||
link_name, link_opts):
|
||||
"""Declare a queue on an amqp session.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'callback' is the callback to call when messages are received
|
||||
'node_name' is the first part of the Qpid address string, before ';'
|
||||
'node_opts' will be applied to the "x-declare" section of "node"
|
||||
in the address string.
|
||||
'link_name' goes into the "name" field of the "link" in the address
|
||||
string
|
||||
'link_opts' will be applied to the "x-declare" section of "link"
|
||||
in the address string.
|
||||
"""
|
||||
self.callback = callback
|
||||
self.receiver = None
|
||||
self.session = None
|
||||
|
||||
addr_opts = {
|
||||
"create": "always",
|
||||
"node": {
|
||||
"type": "topic",
|
||||
"x-declare": {
|
||||
"durable": True,
|
||||
"auto-delete": True,
|
||||
},
|
||||
},
|
||||
"link": {
|
||||
"name": link_name,
|
||||
"durable": True,
|
||||
"x-declare": {
|
||||
"durable": False,
|
||||
"auto-delete": True,
|
||||
"exclusive": False,
|
||||
},
|
||||
},
|
||||
}
|
||||
addr_opts["node"]["x-declare"].update(node_opts)
|
||||
addr_opts["link"]["x-declare"].update(link_opts)
|
||||
|
||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||
|
||||
self.reconnect(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-declare the receiver after a qpid reconnect"""
|
||||
self.session = session
|
||||
self.receiver = session.receiver(self.address)
|
||||
self.receiver.capacity = 1
|
||||
|
||||
def consume(self):
|
||||
"""Fetch the message and pass it to the callback object"""
|
||||
message = self.receiver.fetch()
|
||||
try:
|
||||
self.callback(message.content)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
finally:
|
||||
self.session.acknowledge(message)
|
||||
|
||||
def get_receiver(self):
|
||||
return self.receiver
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'"""
|
||||
|
||||
def __init__(self, conf, session, msg_id, callback):
|
||||
"""Init a 'direct' queue.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'msg_id' is the msg_id to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
"""
|
||||
|
||||
super(DirectConsumer, self).__init__(session, callback,
|
||||
"%s/%s" % (msg_id, msg_id),
|
||||
{"type": "direct"},
|
||||
msg_id,
|
||||
{"exclusive": True})
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'"""
|
||||
|
||||
def __init__(self, conf, session, topic, callback, name=None,
|
||||
exchange_name=None):
|
||||
"""Init a 'topic' queue.
|
||||
|
||||
:param session: the amqp session to use
|
||||
:param topic: is the topic to listen on
|
||||
:paramtype topic: str
|
||||
:param callback: the callback to call when messages are received
|
||||
:param name: optional queue name, defaults to topic
|
||||
"""
|
||||
|
||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicConsumer, self).__init__(session, callback,
|
||||
"%s/%s" % (exchange_name, topic),
|
||||
{}, name or topic, {})
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'"""
|
||||
|
||||
def __init__(self, conf, session, topic, callback):
|
||||
"""Init a 'fanout' queue.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'topic' is the topic to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
"""
|
||||
|
||||
super(FanoutConsumer, self).__init__(
|
||||
session, callback,
|
||||
"%s_fanout" % topic,
|
||||
{"durable": False, "type": "fanout"},
|
||||
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
|
||||
{"exclusive": True})
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class"""
|
||||
|
||||
def __init__(self, session, node_name, node_opts=None):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
and other options
|
||||
"""
|
||||
self.sender = None
|
||||
self.session = session
|
||||
|
||||
addr_opts = {
|
||||
"create": "always",
|
||||
"node": {
|
||||
"type": "topic",
|
||||
"x-declare": {
|
||||
"durable": False,
|
||||
# auto-delete isn't implemented for exchanges in qpid,
|
||||
# but put in here anyway
|
||||
"auto-delete": True,
|
||||
},
|
||||
},
|
||||
}
|
||||
if node_opts:
|
||||
addr_opts["node"]["x-declare"].update(node_opts)
|
||||
|
||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||
|
||||
self.reconnect(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-establish the Sender after a reconnection"""
|
||||
self.sender = session.sender(self.address)
|
||||
|
||||
def send(self, msg):
|
||||
"""Send a message"""
|
||||
self.sender.send(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'"""
|
||||
def __init__(self, conf, session, msg_id):
|
||||
"""Init a 'direct' publisher."""
|
||||
super(DirectPublisher, self).__init__(session, msg_id,
|
||||
{"type": "Direct"})
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'"""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicPublisher, self).__init__(session,
|
||||
"%s/%s" % (exchange_name, topic))
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'"""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'fanout' publisher.
|
||||
"""
|
||||
super(FanoutPublisher, self).__init__(
|
||||
session,
|
||||
"%s_fanout" % topic, {"type": "fanout"})
|
||||
|
||||
|
||||
class NotifyPublisher(Publisher):
|
||||
"""Publisher class for notifications"""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(NotifyPublisher, self).__init__(session,
|
||||
"%s/%s" % (exchange_name, topic),
|
||||
{"durable": True})
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
pool = None
|
||||
|
||||
def __init__(self, conf, server_params=None):
|
||||
self.session = None
|
||||
self.consumers = {}
|
||||
self.consumer_thread = None
|
||||
self.conf = conf
|
||||
|
||||
params = {
|
||||
'qpid_hosts': self.conf.qpid_hosts,
|
||||
'username': self.conf.qpid_username,
|
||||
'password': self.conf.qpid_password,
|
||||
}
|
||||
params.update(server_params or {})
|
||||
|
||||
self.brokers = params['qpid_hosts']
|
||||
self.username = params['username']
|
||||
self.password = params['password']
|
||||
self.connection_create(self.brokers[0])
|
||||
self.reconnect()
|
||||
|
||||
def connection_create(self, broker):
|
||||
# Create the connection - this does not open the connection
|
||||
self.connection = qpid.messaging.Connection(broker)
|
||||
|
||||
# Check if flags are set and if so set them for the connection
|
||||
# before we call open
|
||||
self.connection.username = self.username
|
||||
self.connection.password = self.password
|
||||
|
||||
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
|
||||
# Reconnection is done by self.reconnect()
|
||||
self.connection.reconnect = False
|
||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
||||
self.connection.protocol = self.conf.qpid_protocol
|
||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
||||
|
||||
def _register_consumer(self, consumer):
|
||||
self.consumers[str(consumer.get_receiver())] = consumer
|
||||
|
||||
def _lookup_consumer(self, receiver):
|
||||
return self.consumers[str(receiver)]
|
||||
|
||||
def reconnect(self):
|
||||
"""Handles reconnecting and re-establishing sessions and queues"""
|
||||
if self.connection.opened():
|
||||
try:
|
||||
self.connection.close()
|
||||
except qpid.messaging.exceptions.ConnectionError:
|
||||
pass
|
||||
|
||||
attempt = 0
|
||||
delay = 1
|
||||
while True:
|
||||
broker = self.brokers[attempt % len(self.brokers)]
|
||||
attempt += 1
|
||||
|
||||
try:
|
||||
self.connection_create(broker)
|
||||
self.connection.open()
|
||||
except qpid.messaging.exceptions.ConnectionError, e:
|
||||
msg_dict = dict(e=e, delay=delay)
|
||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
||||
"Sleeping %(delay)s seconds") % msg_dict
|
||||
LOG.error(msg)
|
||||
time.sleep(delay)
|
||||
delay = min(2 * delay, 60)
|
||||
else:
|
||||
LOG.info(_('Connected to AMQP server on %s'), broker)
|
||||
break
|
||||
|
||||
self.session = self.connection.session()
|
||||
|
||||
if self.consumers:
|
||||
consumers = self.consumers
|
||||
self.consumers = {}
|
||||
|
||||
for consumer in consumers.itervalues():
|
||||
consumer.reconnect(self.session)
|
||||
self._register_consumer(consumer)
|
||||
|
||||
LOG.debug(_("Re-established AMQP queues"))
|
||||
|
||||
def ensure(self, error_callback, method, *args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (qpid.messaging.exceptions.Empty,
|
||||
qpid.messaging.exceptions.ConnectionError), e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
self.cancel_consumer_thread()
|
||||
self.connection.close()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
self.cancel_consumer_thread()
|
||||
self.session.close()
|
||||
self.session = self.connection.session()
|
||||
self.consumers = {}
|
||||
|
||||
def declare_consumer(self, consumer_cls, topic, callback):
|
||||
"""Create a Consumer using the class that was passed in and
|
||||
add it to our list of consumers
|
||||
"""
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
||||
"%(err_str)s") % log_info)
|
||||
|
||||
def _declare_consumer():
|
||||
consumer = consumer_cls(self.conf, self.session, topic, callback)
|
||||
self._register_consumer(consumer)
|
||||
return consumer
|
||||
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers"""
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, qpid.messaging.exceptions.Empty):
|
||||
LOG.exception(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
str(exc))
|
||||
|
||||
def _consume():
|
||||
nxt_receiver = self.session.next_receiver(timeout=timeout)
|
||||
try:
|
||||
self._lookup_consumer(nxt_receiver).consume()
|
||||
except Exception:
|
||||
LOG.exception(_("Error processing message. Skipping it."))
|
||||
|
||||
for iteration in itertools.count(0):
|
||||
if limit and iteration >= limit:
|
||||
raise StopIteration
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread"""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
self.consumer_thread.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def publisher_send(self, cls, topic, msg):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.exception(_("Failed to publish message to topic "
|
||||
"'%(topic)s': %(err_str)s") % log_info)
|
||||
|
||||
def _publisher_send():
|
||||
publisher = cls(self.conf, self.session, topic)
|
||||
publisher.send(msg)
|
||||
|
||||
return self.ensure(_connect_error, _publisher_send)
|
||||
|
||||
def declare_direct_consumer(self, topic, callback):
|
||||
"""Create a 'direct' queue.
|
||||
In nova's use, this is generally a msg_id queue used for
|
||||
responses for call/multicall
|
||||
"""
|
||||
self.declare_consumer(DirectConsumer, topic, callback)
|
||||
|
||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||
exchange_name=None):
|
||||
"""Create a 'topic' consumer."""
|
||||
self.declare_consumer(functools.partial(TopicConsumer,
|
||||
name=queue_name,
|
||||
exchange_name=exchange_name,
|
||||
),
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer"""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message"""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg):
|
||||
"""Send a 'topic' message"""
|
||||
self.publisher_send(TopicPublisher, topic, msg)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic"""
|
||||
self.publisher_send(NotifyPublisher, topic, msg)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers"""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread"""
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
except greenlet.GreenletExit:
|
||||
return
|
||||
if self.consumer_thread is None:
|
||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
|
||||
if fanout:
|
||||
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
|
||||
else:
|
||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
|
||||
return consumer
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
|
||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
|
||||
name=pool_name)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
|
||||
return consumer
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
return rpc_amqp.multicall(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
return rpc_amqp.call(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
return rpc_amqp.cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
return rpc_amqp.fanout_cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
return rpc_amqp.cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
return rpc_amqp.fanout_cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cleanup():
|
||||
return rpc_amqp.cleanup(Connection.pool)
|
@ -1,718 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pprint
|
||||
import socket
|
||||
import string
|
||||
import sys
|
||||
import types
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
from eventlet.green import zmq
|
||||
import greenlet
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import importutils
|
||||
from cloudbaseinit.openstack.common import jsonutils
|
||||
from cloudbaseinit.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
# for convenience, are not modified.
|
||||
pformat = pprint.pformat
|
||||
Timeout = eventlet.timeout.Timeout
|
||||
LOG = rpc_common.LOG
|
||||
RemoteError = rpc_common.RemoteError
|
||||
RPCException = rpc_common.RPCException
|
||||
|
||||
zmq_opts = [
|
||||
cfg.StrOpt('rpc_zmq_bind_address', default='*',
|
||||
help='ZeroMQ bind address. Should be a wildcard (*), '
|
||||
'an ethernet interface, or IP. '
|
||||
'The "host" option should point or resolve to this '
|
||||
'address.'),
|
||||
|
||||
# The module.Class to use for matchmaking.
|
||||
cfg.StrOpt(
|
||||
'rpc_zmq_matchmaker',
|
||||
default=('cloudbaseinit.openstack.common.rpc.'
|
||||
'matchmaker.MatchMakerLocalhost'),
|
||||
help='MatchMaker driver',
|
||||
),
|
||||
|
||||
# The following port is unassigned by IANA as of 2012-05-21
|
||||
cfg.IntOpt('rpc_zmq_port', default=9501,
|
||||
help='ZeroMQ receiver listening port'),
|
||||
|
||||
cfg.IntOpt('rpc_zmq_contexts', default=1,
|
||||
help='Number of ZeroMQ contexts, defaults to 1'),
|
||||
|
||||
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
|
||||
help='Directory for holding IPC sockets'),
|
||||
|
||||
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
|
||||
help='Name of this node. Must be a valid hostname, FQDN, or '
|
||||
'IP address. Must match "host" option, if running Nova.')
|
||||
]
|
||||
|
||||
|
||||
# These globals are defined in register_opts(conf),
|
||||
# a mandatory initialization call
|
||||
CONF = None
|
||||
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
||||
matchmaker = None # memoized matchmaker object
|
||||
|
||||
|
||||
def _serialize(data):
|
||||
"""
|
||||
Serialization wrapper
|
||||
We prefer using JSON, but it cannot encode all types.
|
||||
Error if a developer passes us bad data.
|
||||
"""
|
||||
try:
|
||||
return str(jsonutils.dumps(data, ensure_ascii=True))
|
||||
except TypeError:
|
||||
LOG.error(_("JSON serialization failed."))
|
||||
raise
|
||||
|
||||
|
||||
def _deserialize(data):
|
||||
"""
|
||||
Deserialization wrapper
|
||||
"""
|
||||
LOG.debug(_("Deserializing: %s"), data)
|
||||
return jsonutils.loads(data)
|
||||
|
||||
|
||||
class ZmqSocket(object):
|
||||
"""
|
||||
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
|
||||
and connection management.
|
||||
|
||||
Can be used as a Context (supports the 'with' statement).
|
||||
"""
|
||||
|
||||
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
|
||||
self.sock = ZMQ_CTX.socket(zmq_type)
|
||||
self.addr = addr
|
||||
self.type = zmq_type
|
||||
self.subscriptions = []
|
||||
|
||||
# Support failures on sending/receiving on wrong socket type.
|
||||
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
|
||||
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
|
||||
self.can_sub = zmq_type in (zmq.SUB, )
|
||||
|
||||
# Support list, str, & None for subscribe arg (cast to list)
|
||||
do_sub = {
|
||||
list: subscribe,
|
||||
str: [subscribe],
|
||||
type(None): []
|
||||
}[type(subscribe)]
|
||||
|
||||
for f in do_sub:
|
||||
self.subscribe(f)
|
||||
|
||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
||||
'subscribe': subscribe, 'bind': bind}
|
||||
|
||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
||||
|
||||
try:
|
||||
if bind:
|
||||
self.sock.bind(addr)
|
||||
else:
|
||||
self.sock.connect(addr)
|
||||
except Exception:
|
||||
raise RPCException(_("Could not open socket."))
|
||||
|
||||
def socket_s(self):
|
||||
"""Get socket type as string."""
|
||||
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
|
||||
'DEALER')
|
||||
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
|
||||
|
||||
def subscribe(self, msg_filter):
|
||||
"""Subscribe."""
|
||||
if not self.can_sub:
|
||||
raise RPCException("Cannot subscribe on this socket.")
|
||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
||||
|
||||
try:
|
||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
self.subscriptions.append(msg_filter)
|
||||
|
||||
def unsubscribe(self, msg_filter):
|
||||
"""Unsubscribe."""
|
||||
if msg_filter not in self.subscriptions:
|
||||
return
|
||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
|
||||
self.subscriptions.remove(msg_filter)
|
||||
|
||||
def close(self):
|
||||
if self.sock is None or self.sock.closed:
|
||||
return
|
||||
|
||||
# We must unsubscribe, or we'll leak descriptors.
|
||||
if len(self.subscriptions) > 0:
|
||||
for f in self.subscriptions:
|
||||
try:
|
||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
||||
except Exception:
|
||||
pass
|
||||
self.subscriptions = []
|
||||
|
||||
# Linger -1 prevents lost/dropped messages
|
||||
try:
|
||||
self.sock.close(linger=-1)
|
||||
except Exception:
|
||||
pass
|
||||
self.sock = None
|
||||
|
||||
def recv(self):
|
||||
if not self.can_recv:
|
||||
raise RPCException(_("You cannot recv on this socket."))
|
||||
return self.sock.recv_multipart()
|
||||
|
||||
def send(self, data):
|
||||
if not self.can_send:
|
||||
raise RPCException(_("You cannot send on this socket."))
|
||||
self.sock.send_multipart(data)
|
||||
|
||||
|
||||
class ZmqClient(object):
|
||||
"""Client for ZMQ sockets."""
|
||||
|
||||
def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
|
||||
self.outq = ZmqSocket(addr, socket_type, bind=bind)
|
||||
|
||||
def cast(self, msg_id, topic, data):
|
||||
self.outq.send([str(msg_id), str(topic), str('cast'),
|
||||
_serialize(data)])
|
||||
|
||||
def close(self):
|
||||
self.outq.close()
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call."""
|
||||
def __init__(self, **kwargs):
|
||||
self.replies = []
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
values['replies'] = self.replies
|
||||
return self.__class__(**values)
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False):
|
||||
if ending:
|
||||
return
|
||||
self.replies.append(reply)
|
||||
|
||||
@classmethod
|
||||
def marshal(self, ctx):
|
||||
ctx_data = ctx.to_dict()
|
||||
return _serialize(ctx_data)
|
||||
|
||||
@classmethod
|
||||
def unmarshal(self, data):
|
||||
return RpcContext.from_dict(_deserialize(data))
|
||||
|
||||
|
||||
class InternalContext(object):
|
||||
"""Used by ConsumerBase as a private context for - methods."""
|
||||
|
||||
def __init__(self, proxy):
|
||||
self.proxy = proxy
|
||||
self.msg_waiter = None
|
||||
|
||||
def _get_response(self, ctx, proxy, topic, data):
|
||||
"""Process a curried message and cast the result to topic."""
|
||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', [])
|
||||
|
||||
try:
|
||||
result = proxy.dispatch(
|
||||
ctx, data['version'], data['method'], **data['args'])
|
||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
||||
except greenlet.GreenletExit:
|
||||
# ignore these since they are just from shutdowns
|
||||
pass
|
||||
except Exception:
|
||||
return {'exc':
|
||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
||||
|
||||
def reply(self, ctx, proxy,
|
||||
msg_id=None, context=None, topic=None, msg=None):
|
||||
"""Reply to a casted call."""
|
||||
# Our real method is curried into msg['args']
|
||||
|
||||
child_ctx = RpcContext.unmarshal(msg[0])
|
||||
response = ConsumerBase.normalize_reply(
|
||||
self._get_response(child_ctx, proxy, topic, msg[1]),
|
||||
ctx.replies)
|
||||
|
||||
LOG.debug(_("Sending reply"))
|
||||
cast(CONF, ctx, topic, {
|
||||
'method': '-process_reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'response': response
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Base Consumer."""
|
||||
|
||||
def __init__(self):
|
||||
self.private_ctx = InternalContext(None)
|
||||
|
||||
@classmethod
|
||||
def normalize_reply(self, result, replies):
|
||||
#TODO(ewindisch): re-evaluate and document this method.
|
||||
if isinstance(result, types.GeneratorType):
|
||||
return list(result)
|
||||
elif replies:
|
||||
return replies
|
||||
else:
|
||||
return [result]
|
||||
|
||||
def process(self, style, target, proxy, ctx, data):
|
||||
# Method starting with - are
|
||||
# processed internally. (non-valid method name)
|
||||
method = data['method']
|
||||
|
||||
# Internal method
|
||||
# uses internal context for safety.
|
||||
if data['method'][0] == '-':
|
||||
# For reply / process_reply
|
||||
method = method[1:]
|
||||
if method == 'reply':
|
||||
self.private_ctx.reply(ctx, proxy, **data['args'])
|
||||
return
|
||||
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', [])
|
||||
proxy.dispatch(ctx, data['version'],
|
||||
data['method'], **data['args'])
|
||||
|
||||
|
||||
class ZmqBaseReactor(ConsumerBase):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
centralized casting broker (PULL-PUSH)
|
||||
for RoundRobin requests.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqBaseReactor, self).__init__()
|
||||
|
||||
self.mapping = {}
|
||||
self.proxies = {}
|
||||
self.threads = []
|
||||
self.sockets = []
|
||||
self.subscribe = {}
|
||||
|
||||
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
|
||||
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
|
||||
zmq_type_out=None, in_bind=True, out_bind=True,
|
||||
subscribe=None):
|
||||
|
||||
LOG.info(_("Registering reactor"))
|
||||
|
||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
||||
raise RPCException("Bad input socktype")
|
||||
|
||||
# Items push in.
|
||||
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
|
||||
subscribe=subscribe)
|
||||
|
||||
self.proxies[inq] = proxy
|
||||
self.sockets.append(inq)
|
||||
|
||||
LOG.info(_("In reactor registered"))
|
||||
|
||||
if not out_addr:
|
||||
return
|
||||
|
||||
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
|
||||
raise RPCException("Bad output socktype")
|
||||
|
||||
# Items push out.
|
||||
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
|
||||
|
||||
self.mapping[inq] = outq
|
||||
self.mapping[outq] = inq
|
||||
self.sockets.append(outq)
|
||||
|
||||
LOG.info(_("Out reactor registered"))
|
||||
|
||||
def consume_in_thread(self):
|
||||
def _consume(sock):
|
||||
LOG.info(_("Consuming socket"))
|
||||
while True:
|
||||
self.consume(sock)
|
||||
|
||||
for k in self.proxies.keys():
|
||||
self.threads.append(
|
||||
self.pool.spawn(_consume, k)
|
||||
)
|
||||
|
||||
def wait(self):
|
||||
for t in self.threads:
|
||||
t.wait()
|
||||
|
||||
def close(self):
|
||||
for s in self.sockets:
|
||||
s.close()
|
||||
|
||||
for t in self.threads:
|
||||
t.kill()
|
||||
|
||||
|
||||
class ZmqProxy(ZmqBaseReactor):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
topic-based proxy, forwarding to
|
||||
IPC sockets.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqProxy, self).__init__(conf)
|
||||
|
||||
self.topic_proxy = {}
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
self.topic_proxy['zmq_replies'] = \
|
||||
ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ),
|
||||
zmq.PUB, bind=True)
|
||||
self.sockets.append(self.topic_proxy['zmq_replies'])
|
||||
|
||||
def consume(self, sock):
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
msg_id, topic, style, in_msg = data
|
||||
topic = topic.split('.', 1)[0]
|
||||
|
||||
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
|
||||
|
||||
# Handle zmq_replies magic
|
||||
if topic.startswith('fanout~'):
|
||||
sock_type = zmq.PUB
|
||||
elif topic.startswith('zmq_replies'):
|
||||
sock_type = zmq.PUB
|
||||
inside = _deserialize(in_msg)
|
||||
msg_id = inside[-1]['args']['msg_id']
|
||||
response = inside[-1]['args']['response']
|
||||
LOG.debug(_("->response->%s"), response)
|
||||
data = [str(msg_id), _serialize(response)]
|
||||
else:
|
||||
sock_type = zmq.PUSH
|
||||
|
||||
if not topic in self.topic_proxy:
|
||||
outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic),
|
||||
sock_type, bind=True)
|
||||
self.topic_proxy[topic] = outq
|
||||
self.sockets.append(outq)
|
||||
LOG.info(_("Created topic proxy: %s"), topic)
|
||||
|
||||
# It takes some time for a pub socket to open,
|
||||
# before we can have any faith in doing a send() to it.
|
||||
if sock_type == zmq.PUB:
|
||||
eventlet.sleep(.5)
|
||||
|
||||
LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data})
|
||||
self.topic_proxy[topic].send(data)
|
||||
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data})
|
||||
|
||||
|
||||
class ZmqReactor(ZmqBaseReactor):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
consumer for messages. Can also be
|
||||
used as a 1:1 proxy
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqReactor, self).__init__(conf)
|
||||
|
||||
def consume(self, sock):
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
||||
if sock in self.mapping:
|
||||
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
|
||||
'data': data})
|
||||
self.mapping[sock].send(data)
|
||||
return
|
||||
|
||||
msg_id, topic, style, in_msg = data
|
||||
|
||||
ctx, request = _deserialize(in_msg)
|
||||
ctx = RpcContext.unmarshal(ctx)
|
||||
|
||||
proxy = self.proxies[sock]
|
||||
|
||||
self.pool.spawn_n(self.process, style, topic,
|
||||
proxy, ctx, request)
|
||||
|
||||
|
||||
class Connection(rpc_common.Connection):
|
||||
"""Manages connections and threads."""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.reactor = ZmqReactor(conf)
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
# Only consume on the base topic name.
|
||||
topic = topic.split('.', 1)[0]
|
||||
|
||||
LOG.info(_("Create Consumer for topic (%(topic)s)") %
|
||||
{'topic': topic})
|
||||
|
||||
# Subscription scenarios
|
||||
if fanout:
|
||||
subscribe = ('', fanout)[type(fanout) == str]
|
||||
sock_type = zmq.SUB
|
||||
topic = 'fanout~' + topic
|
||||
else:
|
||||
sock_type = zmq.PULL
|
||||
subscribe = None
|
||||
|
||||
# Receive messages from (local) proxy
|
||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||
(CONF.rpc_zmq_ipc_dir, topic)
|
||||
|
||||
LOG.debug(_("Consumer is a zmq.%s"),
|
||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
||||
|
||||
self.reactor.register(proxy, inaddr, sock_type,
|
||||
subscribe=subscribe, in_bind=False)
|
||||
|
||||
def close(self):
|
||||
self.reactor.close()
|
||||
|
||||
def wait(self):
|
||||
self.reactor.wait()
|
||||
|
||||
def consume_in_thread(self):
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
|
||||
def _cast(addr, context, msg_id, topic, msg, timeout=None):
|
||||
timeout_cast = timeout or CONF.rpc_cast_timeout
|
||||
payload = [RpcContext.marshal(context), msg]
|
||||
|
||||
with Timeout(timeout_cast, exception=rpc_common.Timeout):
|
||||
try:
|
||||
conn = ZmqClient(addr)
|
||||
|
||||
# assumes cast can't return an exception
|
||||
conn.cast(msg_id, topic, payload)
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("Cast failed. ZMQ Socket Exception")
|
||||
finally:
|
||||
if 'conn' in vars():
|
||||
conn.close()
|
||||
|
||||
|
||||
def _call(addr, context, msg_id, topic, msg, timeout=None):
|
||||
# timeout_response is how long we wait for a response
|
||||
timeout = timeout or CONF.rpc_response_timeout
|
||||
|
||||
# The msg_id is used to track replies.
|
||||
msg_id = uuid.uuid4().hex
|
||||
|
||||
# Replies always come into the reply service.
|
||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
||||
|
||||
LOG.debug(_("Creating payload"))
|
||||
# Curry the original request into a reply method.
|
||||
mcontext = RpcContext.marshal(context)
|
||||
payload = {
|
||||
'method': '-reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'context': mcontext,
|
||||
'topic': reply_topic,
|
||||
'msg': [mcontext, msg]
|
||||
}
|
||||
}
|
||||
|
||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
||||
|
||||
# Messages arriving async.
|
||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
||||
with Timeout(timeout, exception=rpc_common.Timeout):
|
||||
try:
|
||||
msg_waiter = ZmqSocket(
|
||||
"ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
|
||||
zmq.SUB, subscribe=msg_id, bind=False
|
||||
)
|
||||
|
||||
LOG.debug(_("Sending cast"))
|
||||
_cast(addr, context, msg_id, topic, payload)
|
||||
|
||||
LOG.debug(_("Cast sent; Waiting reply"))
|
||||
# Blocks until receives reply
|
||||
msg = msg_waiter.recv()
|
||||
LOG.debug(_("Received message: %s"), msg)
|
||||
LOG.debug(_("Unpacking response"))
|
||||
responses = _deserialize(msg[-1])
|
||||
# ZMQError trumps the Timeout error.
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("ZMQ Socket Error")
|
||||
finally:
|
||||
if 'msg_waiter' in vars():
|
||||
msg_waiter.close()
|
||||
|
||||
# It seems we don't need to do all of the following,
|
||||
# but perhaps it would be useful for multicall?
|
||||
# One effect of this is that we're checking all
|
||||
# responses for Exceptions.
|
||||
for resp in responses:
|
||||
if isinstance(resp, types.DictType) and 'exc' in resp:
|
||||
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
|
||||
|
||||
return responses[-1]
|
||||
|
||||
|
||||
def _multi_send(method, context, topic, msg, timeout=None):
|
||||
"""
|
||||
Wraps the sending of messages,
|
||||
dispatches to the matchmaker and sends
|
||||
message to all relevant hosts.
|
||||
"""
|
||||
conf = CONF
|
||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||
|
||||
queues = matchmaker.queues(topic)
|
||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
||||
|
||||
# Don't stack if we have no matchmaker results
|
||||
if len(queues) == 0:
|
||||
LOG.warn(_("No matchmaker results. Not casting."))
|
||||
# While not strictly a timeout, callers know how to handle
|
||||
# this exception and a timeout isn't too big a lie.
|
||||
raise rpc_common.Timeout, "No match from matchmaker."
|
||||
|
||||
# This supports brokerless fanout (addresses > 1)
|
||||
for queue in queues:
|
||||
(_topic, ip_addr) = queue
|
||||
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
|
||||
|
||||
if method.__name__ == '_cast':
|
||||
eventlet.spawn_n(method, _addr, context,
|
||||
_topic, _topic, msg, timeout)
|
||||
return
|
||||
return method(_addr, context, _topic, _topic, msg, timeout)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
return Connection(conf)
|
||||
|
||||
|
||||
def multicall(conf, *args, **kwargs):
|
||||
"""Multiple calls."""
|
||||
return _multi_send(_call, *args, **kwargs)
|
||||
|
||||
|
||||
def call(conf, *args, **kwargs):
|
||||
"""Send a message, expect a response."""
|
||||
data = _multi_send(_call, *args, **kwargs)
|
||||
return data[-1]
|
||||
|
||||
|
||||
def cast(conf, *args, **kwargs):
|
||||
"""Send a message expecting no reply."""
|
||||
_multi_send(_cast, *args, **kwargs)
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg, **kwargs):
|
||||
"""Send a message to all listening and expect no reply."""
|
||||
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
|
||||
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
|
||||
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, **kwargs):
|
||||
"""
|
||||
Send notification event.
|
||||
Notifications are sent to topic-priority.
|
||||
This differs from the AMQP drivers which send to topic.priority.
|
||||
"""
|
||||
# NOTE(ewindisch): dot-priority in rpc notifier does not
|
||||
# work with our assumptions.
|
||||
topic.replace('.', '-')
|
||||
cast(conf, context, topic, msg, **kwargs)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Clean up resources in use by implementation."""
|
||||
global ZMQ_CTX
|
||||
global matchmaker
|
||||
matchmaker = None
|
||||
ZMQ_CTX.term()
|
||||
ZMQ_CTX = None
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
"""Registration of options for this driver."""
|
||||
#NOTE(ewindisch): ZMQ_CTX and matchmaker
|
||||
# are initialized here as this is as good
|
||||
# an initialization method as any.
|
||||
|
||||
# We memoize through these globals
|
||||
global ZMQ_CTX
|
||||
global matchmaker
|
||||
global CONF
|
||||
|
||||
if not CONF:
|
||||
conf.register_opts(zmq_opts)
|
||||
CONF = conf
|
||||
# Don't re-set, if this method is called twice.
|
||||
if not ZMQ_CTX:
|
||||
ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts)
|
||||
if not matchmaker:
|
||||
# rpc_zmq_matchmaker should be set to a 'module.Class'
|
||||
mm_path = conf.rpc_zmq_matchmaker.split('.')
|
||||
mm_module = '.'.join(mm_path[:-1])
|
||||
mm_class = mm_path[-1]
|
||||
|
||||
# Only initialize a class.
|
||||
if mm_path[-1][0] not in string.ascii_uppercase:
|
||||
LOG.error(_("Matchmaker could not be loaded.\n"
|
||||
"rpc_zmq_matchmaker is not a class."))
|
||||
raise RPCException(_("Error loading Matchmaker."))
|
||||
|
||||
mm_impl = importutils.import_module(mm_module)
|
||||
mm_constructor = getattr(mm_impl, mm_class)
|
||||
matchmaker = mm_constructor()
|
||||
|
||||
|
||||
register_opts(cfg.CONF)
|
@ -1,258 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from cloudbaseinit.openstack.common import cfg
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
|
||||
|
||||
matchmaker_opts = [
|
||||
# Matchmaker ring file
|
||||
cfg.StrOpt('matchmaker_ringfile',
|
||||
default='/etc/nova/matchmaker_ring.json',
|
||||
help='Matchmaker ring file (JSON)'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(matchmaker_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
contextmanager = contextlib.contextmanager
|
||||
|
||||
|
||||
class MatchMakerException(Exception):
|
||||
"""Signified a match could not be found."""
|
||||
message = _("Match not found by MatchMaker.")
|
||||
|
||||
|
||||
class Exchange(object):
|
||||
"""
|
||||
Implements lookups.
|
||||
Subclass this to support hashtables, dns, etc.
|
||||
"""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self, key):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Binding(object):
|
||||
"""
|
||||
A binding on which to perform a lookup.
|
||||
"""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def test(self, key):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class MatchMakerBase(object):
|
||||
"""Match Maker Base Class."""
|
||||
|
||||
def __init__(self):
|
||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
||||
self.bindings = []
|
||||
|
||||
def add_binding(self, binding, rule, last=True):
|
||||
self.bindings.append((binding, rule, False, last))
|
||||
|
||||
#NOTE(ewindisch): kept the following method in case we implement the
|
||||
# underlying support.
|
||||
#def add_negate_binding(self, binding, rule, last=True):
|
||||
# self.bindings.append((binding, rule, True, last))
|
||||
|
||||
def queues(self, key):
|
||||
workers = []
|
||||
|
||||
# bit is for negate bindings - if we choose to implement it.
|
||||
# last stops processing rules if this matches.
|
||||
for (binding, exchange, bit, last) in self.bindings:
|
||||
if binding.test(key):
|
||||
workers.extend(exchange.run(key))
|
||||
|
||||
# Support last.
|
||||
if last:
|
||||
return workers
|
||||
return workers
|
||||
|
||||
|
||||
class DirectBinding(Binding):
|
||||
"""
|
||||
Specifies a host in the key via a '.' character
|
||||
Although dots are used in the key, the behavior here is
|
||||
that it maps directly to a host, thus direct.
|
||||
"""
|
||||
def test(self, key):
|
||||
if '.' in key:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class TopicBinding(Binding):
|
||||
"""
|
||||
Where a 'bare' key without dots.
|
||||
AMQP generally considers topic exchanges to be those *with* dots,
|
||||
but we deviate here in terminology as the behavior here matches
|
||||
that of a topic exchange (whereas where there are dots, behavior
|
||||
matches that of a direct exchange.
|
||||
"""
|
||||
def test(self, key):
|
||||
if '.' not in key:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class FanoutBinding(Binding):
|
||||
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
||||
def test(self, key):
|
||||
if key.startswith('fanout~'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class StubExchange(Exchange):
|
||||
"""Exchange that does nothing."""
|
||||
def run(self, key):
|
||||
return [(key, None)]
|
||||
|
||||
|
||||
class RingExchange(Exchange):
|
||||
"""
|
||||
Match Maker where hosts are loaded from a static file containing
|
||||
a hashmap (JSON formatted).
|
||||
|
||||
__init__ takes optional ring dictionary argument, otherwise
|
||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(RingExchange, self).__init__()
|
||||
|
||||
if ring:
|
||||
self.ring = ring
|
||||
else:
|
||||
fh = open(CONF.matchmaker_ringfile, 'r')
|
||||
self.ring = json.load(fh)
|
||||
fh.close()
|
||||
|
||||
self.ring0 = {}
|
||||
for k in self.ring.keys():
|
||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
||||
|
||||
def _ring_has(self, key):
|
||||
if key in self.ring0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class RoundRobinRingExchange(RingExchange):
|
||||
"""A Topic Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(RoundRobinRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
if not self._ring_has(key):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (key, )
|
||||
)
|
||||
return []
|
||||
host = next(self.ring0[key])
|
||||
return [(key + '.' + host, host)]
|
||||
|
||||
|
||||
class FanoutRingExchange(RingExchange):
|
||||
"""Fanout Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(FanoutRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
# Assume starts with "fanout~", strip it for lookup.
|
||||
nkey = key.split('fanout~')[1:][0]
|
||||
if not self._ring_has(nkey):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (nkey, )
|
||||
)
|
||||
return []
|
||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||
|
||||
|
||||
class LocalhostExchange(Exchange):
|
||||
"""Exchange where all direct topics are local."""
|
||||
def __init__(self):
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
return [(key.split('.')[0] + '.localhost', 'localhost')]
|
||||
|
||||
|
||||
class DirectExchange(Exchange):
|
||||
"""
|
||||
Exchange where all topic keys are split, sending to second half.
|
||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
b, e = key.split('.', 1)
|
||||
return [(b, e)]
|
||||
|
||||
|
||||
class MatchMakerRing(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where hosts are loaded from a static hashmap.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(MatchMakerRing, self).__init__()
|
||||
self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
|
||||
self.add_binding(DirectBinding(), DirectExchange())
|
||||
self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
|
||||
|
||||
|
||||
class MatchMakerLocalhost(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where all bare topics resolve to localhost.
|
||||
Useful for testing.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatchMakerLocalhost, self).__init__()
|
||||
self.add_binding(FanoutBinding(), LocalhostExchange())
|
||||
self.add_binding(DirectBinding(), DirectExchange())
|
||||
self.add_binding(TopicBinding(), LocalhostExchange())
|
||||
|
||||
|
||||
class MatchMakerStub(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where topics are untouched.
|
||||
Useful for testing, or for AMQP/brokered queues.
|
||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatchMakerLocalhost, self).__init__()
|
||||
|
||||
self.add_binding(FanoutBinding(), StubExchange())
|
||||
self.add_binding(DirectBinding(), StubExchange())
|
||||
self.add_binding(TopicBinding(), StubExchange())
|
@ -1,165 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A helper class for proxy objects to remote APIs.
|
||||
|
||||
For more information about rpc API version numbers, see:
|
||||
rpc/dispatcher.py
|
||||
"""
|
||||
|
||||
|
||||
from cloudbaseinit.openstack.common import rpc
|
||||
|
||||
|
||||
class RpcProxy(object):
|
||||
"""A helper class for rpc clients.
|
||||
|
||||
This class is a wrapper around the RPC client API. It allows you to
|
||||
specify the topic and API version in a single place. This is intended to
|
||||
be used as a base class for a class that implements the client side of an
|
||||
rpc API.
|
||||
"""
|
||||
|
||||
def __init__(self, topic, default_version):
|
||||
"""Initialize an RpcProxy.
|
||||
|
||||
:param topic: The topic to use for all messages.
|
||||
:param default_version: The default API version to request in all
|
||||
outgoing messages. This can be overridden on a per-message
|
||||
basis.
|
||||
"""
|
||||
self.topic = topic
|
||||
self.default_version = default_version
|
||||
super(RpcProxy, self).__init__()
|
||||
|
||||
def _set_version(self, msg, vers):
|
||||
"""Helper method to set the version in a message.
|
||||
|
||||
:param msg: The message having a version added to it.
|
||||
:param vers: The version number to add to the message.
|
||||
"""
|
||||
msg['version'] = vers if vers else self.default_version
|
||||
|
||||
def _get_topic(self, topic):
|
||||
"""Return the topic to use for a message."""
|
||||
return topic if topic else self.topic
|
||||
|
||||
@staticmethod
|
||||
def make_msg(method, **kwargs):
|
||||
return {'method': method, 'args': kwargs}
|
||||
|
||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.call() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: The return value from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.call(context, self._get_topic(topic), msg, timeout)
|
||||
|
||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.multicall() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: An iterator that lets you process each of the returned values
|
||||
from the remote method as they arrive.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.multicall(context, self._get_topic(topic), msg, timeout)
|
||||
|
||||
def cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.cast() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.cast() does not wait on any return value from the
|
||||
remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.fanout_cast() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.fanout_cast() does not wait on any return value
|
||||
from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def cast_to_server(self, context, server_params, msg, topic=None,
|
||||
version=None):
|
||||
"""rpc.cast_to_server() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
||||
details.
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.cast_to_server() does not wait on any
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
||||
version=None):
|
||||
"""rpc.fanout_cast_to_server() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
||||
details.
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.fanout_cast_to_server() does not wait on any
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.fanout_cast_to_server(context, server_params,
|
||||
self._get_topic(topic), msg)
|
@ -1,75 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cloudbaseinit.openstack.common.gettextutils import _
|
||||
from cloudbaseinit.openstack.common import log as logging
|
||||
from cloudbaseinit.openstack.common import rpc
|
||||
from cloudbaseinit.openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||
from cloudbaseinit.openstack.common import service
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Service(service.Service):
|
||||
"""Service object for binaries running on hosts.
|
||||
|
||||
A service enables rpc by listening to queues based on topic and host."""
|
||||
def __init__(self, host, topic, manager=None):
|
||||
super(Service, self).__init__()
|
||||
self.host = host
|
||||
self.topic = topic
|
||||
if manager is None:
|
||||
self.manager = self
|
||||
else:
|
||||
self.manager = manager
|
||||
|
||||
def start(self):
|
||||
super(Service, self).start()
|
||||
|
||||
self.conn = rpc.create_connection(new=True)
|
||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
||||
self.topic)
|
||||
|
||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager])
|
||||
|
||||
# Share this same connection for these Consumers
|
||||
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
|
||||
|
||||
node_topic = '%s.%s' % (self.topic, self.host)
|
||||
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
|
||||
|
||||
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
|
||||
|
||||
# Hook to allow the manager to do other initializations after
|
||||
# the rpc connection is created.
|
||||
if callable(getattr(self.manager, 'initialize_service_hook', None)):
|
||||
self.manager.initialize_service_hook(self)
|
||||
|
||||
# Consume from all consumers in a thread
|
||||
self.conn.consume_in_thread()
|
||||
|
||||
def stop(self):
|
||||
# Try to shut the connection down, but if we get any sort of
|
||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
||||
try:
|
||||
self.conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
super(Service, self).stop()
|
@ -1,6 +1,7 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -19,7 +20,7 @@
|
||||
Utilities with minimum-depends for use in setup.py
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import email
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
@ -33,20 +34,26 @@ def parse_mailmap(mailmap='.mailmap'):
|
||||
if os.path.exists(mailmap):
|
||||
with open(mailmap, 'r') as fp:
|
||||
for l in fp:
|
||||
l = l.strip()
|
||||
if not l.startswith('#') and ' ' in l:
|
||||
canonical_email, alias = [x for x in l.split(' ')
|
||||
if x.startswith('<')]
|
||||
mapping[alias] = canonical_email
|
||||
try:
|
||||
canonical_email, alias = re.match(
|
||||
r'[^#]*?(<.+>).*(<.+>).*', l).groups()
|
||||
except AttributeError:
|
||||
continue
|
||||
mapping[alias] = canonical_email
|
||||
return mapping
|
||||
|
||||
|
||||
def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
|
||||
mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
|
||||
return parse_mailmap(mailmap)
|
||||
|
||||
|
||||
def canonicalize_emails(changelog, mapping):
|
||||
"""Takes in a string and an email alias mapping and replaces all
|
||||
instances of the aliases in the string with their real email.
|
||||
"""
|
||||
for alias, email in mapping.iteritems():
|
||||
changelog = changelog.replace(alias, email)
|
||||
for alias, email_address in mapping.iteritems():
|
||||
changelog = changelog.replace(alias, email_address)
|
||||
return changelog
|
||||
|
||||
|
||||
@ -106,24 +113,18 @@ def parse_dependency_links(requirements_files=['requirements.txt',
|
||||
return dependency_links
|
||||
|
||||
|
||||
def write_requirements():
|
||||
venv = os.environ.get('VIRTUAL_ENV', None)
|
||||
if venv is not None:
|
||||
with open("requirements.txt", "w") as req_file:
|
||||
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
|
||||
stdout=subprocess.PIPE)
|
||||
requirements = output.communicate()[0].strip()
|
||||
req_file.write(requirements)
|
||||
|
||||
|
||||
def _run_shell_command(cmd):
|
||||
def _run_shell_command(cmd, throw_on_error=False):
|
||||
if os.name == 'nt':
|
||||
output = subprocess.Popen(["cmd.exe", "/C", cmd],
|
||||
stdout=subprocess.PIPE)
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
else:
|
||||
output = subprocess.Popen(["/bin/sh", "-c", cmd],
|
||||
stdout=subprocess.PIPE)
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
out = output.communicate()
|
||||
if output.returncode and throw_on_error:
|
||||
raise Exception("%s returned %d" % cmd, output.returncode)
|
||||
if len(out) == 0:
|
||||
return None
|
||||
if len(out[0].strip()) == 0:
|
||||
@ -131,65 +132,26 @@ def _run_shell_command(cmd):
|
||||
return out[0].strip()
|
||||
|
||||
|
||||
def _get_git_next_version_suffix(branch_name):
|
||||
datestamp = datetime.datetime.now().strftime('%Y%m%d')
|
||||
if branch_name == 'milestone-proposed':
|
||||
revno_prefix = "r"
|
||||
else:
|
||||
revno_prefix = ""
|
||||
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
|
||||
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
|
||||
milestonever = _run_shell_command(milestone_cmd)
|
||||
if milestonever:
|
||||
first_half = "%s~%s" % (milestonever, datestamp)
|
||||
else:
|
||||
first_half = datestamp
|
||||
|
||||
post_version = _get_git_post_version()
|
||||
# post version should look like:
|
||||
# 0.1.1.4.gcc9e28a
|
||||
# where the bit after the last . is the short sha, and the bit between
|
||||
# the last and second to last is the revno count
|
||||
(revno, sha) = post_version.split(".")[-2:]
|
||||
second_half = "%s%s.%s" % (revno_prefix, revno, sha)
|
||||
return ".".join((first_half, second_half))
|
||||
|
||||
|
||||
def _get_git_current_tag():
|
||||
return _run_shell_command("git tag --contains HEAD")
|
||||
|
||||
|
||||
def _get_git_tag_info():
|
||||
return _run_shell_command("git describe --tags")
|
||||
|
||||
|
||||
def _get_git_post_version():
|
||||
current_tag = _get_git_current_tag()
|
||||
if current_tag is not None:
|
||||
return current_tag
|
||||
else:
|
||||
tag_info = _get_git_tag_info()
|
||||
if tag_info is None:
|
||||
base_version = "0.0"
|
||||
cmd = "git --no-pager log --oneline"
|
||||
out = _run_shell_command(cmd)
|
||||
revno = len(out.split("\n"))
|
||||
sha = _run_shell_command("git describe --always")
|
||||
else:
|
||||
tag_infos = tag_info.split("-")
|
||||
base_version = "-".join(tag_infos[:-2])
|
||||
(revno, sha) = tag_infos[-2:]
|
||||
return "%s.%s.%s" % (base_version, revno, sha)
|
||||
def _get_git_directory():
|
||||
parent_dir = os.path.dirname(__file__)
|
||||
while True:
|
||||
git_dir = os.path.join(parent_dir, '.git')
|
||||
if os.path.exists(git_dir):
|
||||
return git_dir
|
||||
parent_dir, child = os.path.split(parent_dir)
|
||||
if not child: # reached to root dir
|
||||
return None
|
||||
|
||||
|
||||
def write_git_changelog():
|
||||
"""Write a changelog based on the git changelog."""
|
||||
new_changelog = 'ChangeLog'
|
||||
git_dir = _get_git_directory()
|
||||
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
|
||||
if os.path.isdir('.git'):
|
||||
git_log_cmd = 'git log --stat'
|
||||
if git_dir:
|
||||
git_log_cmd = 'git --git-dir=%s log' % git_dir
|
||||
changelog = _run_shell_command(git_log_cmd)
|
||||
mailmap = parse_mailmap()
|
||||
mailmap = _parse_git_mailmap(git_dir)
|
||||
with open(new_changelog, "w") as changelog_file:
|
||||
changelog_file.write(canonicalize_emails(changelog, mailmap))
|
||||
else:
|
||||
@ -201,13 +163,23 @@ def generate_authors():
|
||||
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
|
||||
old_authors = 'AUTHORS.in'
|
||||
new_authors = 'AUTHORS'
|
||||
git_dir = _get_git_directory()
|
||||
if not os.getenv('SKIP_GENERATE_AUTHORS'):
|
||||
if os.path.isdir('.git'):
|
||||
if git_dir:
|
||||
# don't include jenkins email address in AUTHORS file
|
||||
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
|
||||
git_log_cmd = ("git --git-dir=" + git_dir +
|
||||
" log --format='%aN <%aE>' | sort -u | "
|
||||
"egrep -v '" + jenkins_email + "'")
|
||||
changelog = _run_shell_command(git_log_cmd)
|
||||
mailmap = parse_mailmap()
|
||||
signed_cmd = ("git log --git-dir=" + git_dir +
|
||||
" | grep -i Co-authored-by: | sort -u")
|
||||
signed_entries = _run_shell_command(signed_cmd)
|
||||
if signed_entries:
|
||||
new_entries = "\n".join(
|
||||
[signed.split(":", 1)[1].strip()
|
||||
for signed in signed_entries.split("\n") if signed])
|
||||
changelog = "\n".join((changelog, new_entries))
|
||||
mailmap = _parse_git_mailmap(git_dir)
|
||||
with open(new_authors, 'w') as new_authors_fh:
|
||||
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
|
||||
if os.path.exists(old_authors):
|
||||
@ -227,26 +199,6 @@ _rst_template = """%(heading)s
|
||||
"""
|
||||
|
||||
|
||||
def read_versioninfo(project):
|
||||
"""Read the versioninfo file. If it doesn't exist, we're in a github
|
||||
zipball, and there's really no way to know what version we really
|
||||
are, but that should be ok, because the utility of that should be
|
||||
just about nil if this code path is in use in the first place."""
|
||||
versioninfo_path = os.path.join(project, 'versioninfo')
|
||||
if os.path.exists(versioninfo_path):
|
||||
with open(versioninfo_path, 'r') as vinfo:
|
||||
version = vinfo.read().strip()
|
||||
else:
|
||||
version = "0.0.0"
|
||||
return version
|
||||
|
||||
|
||||
def write_versioninfo(project, version):
|
||||
"""Write a simple file containing the version of the package."""
|
||||
with open(os.path.join(project, 'versioninfo'), 'w') as fil:
|
||||
fil.write("%s\n" % version)
|
||||
|
||||
|
||||
def get_cmdclass():
|
||||
"""Return dict of commands to run from setup.py."""
|
||||
|
||||
@ -276,6 +228,9 @@ def get_cmdclass():
|
||||
from sphinx.setup_command import BuildDoc
|
||||
|
||||
class LocalBuildDoc(BuildDoc):
|
||||
|
||||
builders = ['html', 'man']
|
||||
|
||||
def generate_autoindex(self):
|
||||
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
|
||||
modules = {}
|
||||
@ -311,56 +266,102 @@ def get_cmdclass():
|
||||
if not os.getenv('SPHINX_DEBUG'):
|
||||
self.generate_autoindex()
|
||||
|
||||
for builder in ['html', 'man']:
|
||||
for builder in self.builders:
|
||||
self.builder = builder
|
||||
self.finalize_options()
|
||||
self.project = self.distribution.get_name()
|
||||
self.version = self.distribution.get_version()
|
||||
self.release = self.distribution.get_version()
|
||||
BuildDoc.run(self)
|
||||
|
||||
class LocalBuildLatex(LocalBuildDoc):
|
||||
builders = ['latex']
|
||||
|
||||
cmdclass['build_sphinx'] = LocalBuildDoc
|
||||
cmdclass['build_sphinx_latex'] = LocalBuildLatex
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return cmdclass
|
||||
|
||||
|
||||
def get_git_branchname():
|
||||
for branch in _run_shell_command("git branch --color=never").split("\n"):
|
||||
if branch.startswith('*'):
|
||||
_branch_name = branch.split()[1].strip()
|
||||
if _branch_name == "(no":
|
||||
_branch_name = "no-branch"
|
||||
return _branch_name
|
||||
def _get_revno(git_dir):
|
||||
"""Return the number of commits since the most recent tag.
|
||||
|
||||
We use git-describe to find this out, but if there are no
|
||||
tags then we fall back to counting commits since the beginning
|
||||
of time.
|
||||
"""
|
||||
describe = _run_shell_command(
|
||||
"git --git-dir=%s describe --always" % git_dir)
|
||||
if "-" in describe:
|
||||
return describe.rsplit("-", 2)[-2]
|
||||
|
||||
# no tags found
|
||||
revlist = _run_shell_command(
|
||||
"git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
|
||||
return len(revlist.splitlines())
|
||||
|
||||
|
||||
def get_pre_version(projectname, base_version):
|
||||
"""Return a version which is leading up to a version that will
|
||||
be released in the future."""
|
||||
if os.path.isdir('.git'):
|
||||
current_tag = _get_git_current_tag()
|
||||
if current_tag is not None:
|
||||
version = current_tag
|
||||
else:
|
||||
branch_name = os.getenv('BRANCHNAME',
|
||||
os.getenv('GERRIT_REFNAME',
|
||||
get_git_branchname()))
|
||||
version_suffix = _get_git_next_version_suffix(branch_name)
|
||||
version = "%s~%s" % (base_version, version_suffix)
|
||||
write_versioninfo(projectname, version)
|
||||
return version
|
||||
else:
|
||||
version = read_versioninfo(projectname)
|
||||
return version
|
||||
|
||||
|
||||
def get_post_version(projectname):
|
||||
def _get_version_from_git(pre_version):
|
||||
"""Return a version which is equal to the tag that's on the current
|
||||
revision if there is one, or tag plus number of additional revisions
|
||||
if the current revision has no tag."""
|
||||
|
||||
if os.path.isdir('.git'):
|
||||
version = _get_git_post_version()
|
||||
write_versioninfo(projectname, version)
|
||||
git_dir = _get_git_directory()
|
||||
if git_dir:
|
||||
if pre_version:
|
||||
try:
|
||||
return _run_shell_command(
|
||||
"git --git-dir=" + git_dir + " describe --exact-match",
|
||||
throw_on_error=True).replace('-', '.')
|
||||
except Exception:
|
||||
sha = _run_shell_command(
|
||||
"git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
|
||||
return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
|
||||
else:
|
||||
return _run_shell_command(
|
||||
"git --git-dir=" + git_dir + " describe --always").replace(
|
||||
'-', '.')
|
||||
return None
|
||||
|
||||
|
||||
def _get_version_from_pkg_info(package_name):
|
||||
"""Get the version from PKG-INFO file if we can."""
|
||||
try:
|
||||
pkg_info_file = open('PKG-INFO', 'r')
|
||||
except (IOError, OSError):
|
||||
return None
|
||||
try:
|
||||
pkg_info = email.message_from_file(pkg_info_file)
|
||||
except email.MessageError:
|
||||
return None
|
||||
# Check to make sure we're in our own dir
|
||||
if pkg_info.get('Name', None) != package_name:
|
||||
return None
|
||||
return pkg_info.get('Version', None)
|
||||
|
||||
|
||||
def get_version(package_name, pre_version=None):
|
||||
"""Get the version of the project. First, try getting it from PKG-INFO, if
|
||||
it exists. If it does, that means we're in a distribution tarball or that
|
||||
install has happened. Otherwise, if there is no PKG-INFO file, pull the
|
||||
version from git.
|
||||
|
||||
We do not support setup.py version sanity in git archive tarballs, nor do
|
||||
we support packagers directly sucking our git repo into theirs. We expect
|
||||
that a source tarball be made from our git repo - or that if someone wants
|
||||
to make a source tarball from a fork of our repo with additional tags in it
|
||||
that they understand and desire the results of doing that.
|
||||
"""
|
||||
version = os.environ.get("OSLO_PACKAGE_VERSION", None)
|
||||
if version:
|
||||
return version
|
||||
return read_versioninfo(projectname)
|
||||
version = _get_version_from_pkg_info(package_name)
|
||||
if version:
|
||||
return version
|
||||
version = _get_version_from_git(pre_version)
|
||||
if version:
|
||||
return version
|
||||
raise Exception("Versioning for this project requires either an sdist"
|
||||
" tarball, or access to an upstream git repository.")
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -25,18 +25,22 @@ import datetime
|
||||
import iso8601
|
||||
|
||||
|
||||
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
|
||||
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
|
||||
# ISO 8601 extended time format with microseconds
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||
|
||||
|
||||
def isotime(at=None):
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format"""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
str = at.strftime(TIME_FORMAT)
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
if not subsecond
|
||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||
str += ('Z' if tz == 'UTC' else tz)
|
||||
return str
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
||||
|
||||
|
||||
def parse_isotime(timestr):
|
||||
@ -71,11 +75,15 @@ def normalize_time(timestamp):
|
||||
|
||||
def is_older_than(before, seconds):
|
||||
"""Return True if before is older than seconds."""
|
||||
if isinstance(before, basestring):
|
||||
before = parse_strtime(before).replace(tzinfo=None)
|
||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def is_newer_than(after, seconds):
|
||||
"""Return True if after is newer than seconds."""
|
||||
if isinstance(after, basestring):
|
||||
after = parse_strtime(after).replace(tzinfo=None)
|
||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
@ -94,6 +102,11 @@ def utcnow():
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns a iso8601 formated date from timestamp"""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
@ -158,3 +171,16 @@ def delta_seconds(before, after):
|
||||
except AttributeError:
|
||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||
float(delta.microseconds) / (10 ** 6))
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""
|
||||
Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:params dt: the time
|
||||
:params window: minimum seconds to remain to consider the time not soon
|
||||
|
||||
:return: True if expiration is within the given duration
|
||||
"""
|
||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||
return normalize_time(dt) <= soon
|
||||
|
@ -1,39 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
UUID related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def is_uuid_like(val):
|
||||
"""Returns validation of a value as a UUID.
|
||||
|
||||
For our purposes, a UUID is a canonical form string:
|
||||
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||
|
||||
"""
|
||||
try:
|
||||
return str(uuid.UUID(val)) == val
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
return False
|
@ -64,5 +64,6 @@ def setup(product_name):
|
||||
if CONF.log_format:
|
||||
serialportlog.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
serialportlog.setFormatter(
|
||||
openstack_logging.LegacyFormatter(datefmt=datefmt))
|
||||
else:
|
||||
serialportlog.setFormatter(
|
||||
openstack_logging.LegacyFormatter(datefmt=datefmt))
|
||||
|
@ -1,7 +1,7 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The list of modules to copy from openstack-common
|
||||
modules=cfg,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
|
||||
modules=log,gettextutils,setup,jsonutils,timeutils,local,notifier
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=cloudbaseinit
|
||||
|
Loading…
x
Reference in New Issue
Block a user