From 06822e61310d167f82ef0cef7ace69ccce4fb0b9 Mon Sep 17 00:00:00 2001 From: Deklan Dieterly Date: Tue, 16 Sep 2014 16:15:49 -0600 Subject: [PATCH] Initial Python Persister Alarms and metrics persisted. Simple first-pass approach. Change-Id: Iea3e6b5506563a2954989cd3170212ad7b296c69 --- .gitignore | 1 + monasca_persister/__init__.py | 1 + monasca_persister/openstack/__init__.py | 0 monasca_persister/openstack/__init__.pyc | Bin 0 -> 199 bytes .../openstack/common/__init__.py | 17 + .../openstack/common/__init__.pyc | Bin 0 -> 311 bytes .../openstack/common/eventlet_backdoor.py | 145 ++++ .../openstack/common/excutils.py | 113 +++ .../openstack/common/fileutils.py | 146 ++++ .../openstack/common/fixture/__init__.py | 0 .../openstack/common/fixture/config.py | 85 +++ .../openstack/common/fixture/lockutils.py | 51 ++ .../openstack/common/fixture/logging.py | 34 + .../openstack/common/fixture/mockpatch.py | 62 ++ .../openstack/common/fixture/moxstubout.py | 43 ++ .../openstack/common/gettextutils.py | 479 ++++++++++++ .../openstack/common/gettextutils.pyc | Bin 0 -> 17888 bytes .../openstack/common/importutils.py | 73 ++ .../openstack/common/importutils.pyc | Bin 0 -> 2915 bytes .../openstack/common/jsonutils.py | 202 +++++ .../openstack/common/jsonutils.pyc | Bin 0 -> 5736 bytes monasca_persister/openstack/common/local.py | 45 ++ monasca_persister/openstack/common/local.pyc | Bin 0 -> 1303 bytes .../openstack/common/lockutils.py | 322 ++++++++ monasca_persister/openstack/common/log.py | 713 ++++++++++++++++++ monasca_persister/openstack/common/log.pyc | Bin 0 -> 27386 bytes .../openstack/common/loopingcall.py | 147 ++++ monasca_persister/openstack/common/service.py | 512 +++++++++++++ .../openstack/common/strutils.py | 311 ++++++++ .../openstack/common/strutils.pyc | Bin 0 -> 10179 bytes monasca_persister/openstack/common/systemd.py | 106 +++ .../openstack/common/threadgroup.py | 147 ++++ .../openstack/common/timeutils.py | 210 ++++++ .../openstack/common/timeutils.pyc | Bin 0 -> 8667 bytes monasca_persister/persister.conf | 24 + monasca_persister/persister.py | 280 +++++++ monasca_persister/service.py | 34 + monasca_persister/test/__init__.py | 1 + 38 files changed, 4304 insertions(+) create mode 100644 monasca_persister/__init__.py create mode 100644 monasca_persister/openstack/__init__.py create mode 100644 monasca_persister/openstack/__init__.pyc create mode 100644 monasca_persister/openstack/common/__init__.py create mode 100644 monasca_persister/openstack/common/__init__.pyc create mode 100644 monasca_persister/openstack/common/eventlet_backdoor.py create mode 100644 monasca_persister/openstack/common/excutils.py create mode 100644 monasca_persister/openstack/common/fileutils.py create mode 100644 monasca_persister/openstack/common/fixture/__init__.py create mode 100644 monasca_persister/openstack/common/fixture/config.py create mode 100644 monasca_persister/openstack/common/fixture/lockutils.py create mode 100644 monasca_persister/openstack/common/fixture/logging.py create mode 100644 monasca_persister/openstack/common/fixture/mockpatch.py create mode 100644 monasca_persister/openstack/common/fixture/moxstubout.py create mode 100644 monasca_persister/openstack/common/gettextutils.py create mode 100644 monasca_persister/openstack/common/gettextutils.pyc create mode 100644 monasca_persister/openstack/common/importutils.py create mode 100644 monasca_persister/openstack/common/importutils.pyc create mode 100644 monasca_persister/openstack/common/jsonutils.py create mode 100644 monasca_persister/openstack/common/jsonutils.pyc create mode 100644 monasca_persister/openstack/common/local.py create mode 100644 monasca_persister/openstack/common/local.pyc create mode 100644 monasca_persister/openstack/common/lockutils.py create mode 100644 monasca_persister/openstack/common/log.py create mode 100644 monasca_persister/openstack/common/log.pyc create mode 100644 monasca_persister/openstack/common/loopingcall.py create mode 100644 monasca_persister/openstack/common/service.py create mode 100644 monasca_persister/openstack/common/strutils.py create mode 100644 monasca_persister/openstack/common/strutils.pyc create mode 100644 monasca_persister/openstack/common/systemd.py create mode 100644 monasca_persister/openstack/common/threadgroup.py create mode 100644 monasca_persister/openstack/common/timeutils.py create mode 100644 monasca_persister/openstack/common/timeutils.pyc create mode 100644 monasca_persister/persister.conf create mode 100644 monasca_persister/persister.py create mode 100644 monasca_persister/service.py create mode 100644 monasca_persister/test/__init__.py diff --git a/.gitignore b/.gitignore index 0d252954..8d642998 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ target/ *.settings/ debs/ logs/ +.idea/ diff --git a/monasca_persister/__init__.py b/monasca_persister/__init__.py new file mode 100644 index 00000000..58cbdea8 --- /dev/null +++ b/monasca_persister/__init__.py @@ -0,0 +1 @@ +__author__ = 'dieterlyd' diff --git a/monasca_persister/openstack/__init__.py b/monasca_persister/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/monasca_persister/openstack/__init__.pyc b/monasca_persister/openstack/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fbb220b987e2e9fa0255b5abd9d3b8700ba029e GIT binary patch literal 199 zcmZ9Gu?hk)42CN@h~NYG2DjYh1wLNa9j*GzMD|v~Gl5g;P>p6^3Dh4riGvekW^f)0=5BDXBijdzg&|`iu3KNO{kgFe xfQzxW3*{CeP&8~!ouuqps$Zjjh7!hi1+@0j7pLD{uSm~bwiuXUFxG}HBx~z_lXG6i O9P8=~IdlRzDSrVz)Khl= literal 0 HcmV?d00001 diff --git a/monasca_persister/openstack/common/eventlet_backdoor.py b/monasca_persister/openstack/common/eventlet_backdoor.py new file mode 100644 index 00000000..31c604b9 --- /dev/null +++ b/monasca_persister/openstack/common/eventlet_backdoor.py @@ -0,0 +1,145 @@ +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import errno +import gc +import os +import pprint +import socket +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +from openstack.common.gettextutils import _LI +from openstack.common import log as logging + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") +eventlet_backdoor_opts = [ + cfg.StrOpt('backdoor_port', + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range + + +def _dont_use_this(): + print("Don't use this, just disconnect instead") + + +def _find_objects(t): + return [o for o in gc.get_objects() if isinstance(o, t)] + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print(i, gt) + traceback.print_stack(gt.gr_frame) + print() + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print(threadId) + traceback.print_stack(stack) + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. + port = sock.getsockname()[1] + LOG.info( + _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()} + ) + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/monasca_persister/openstack/common/excutils.py b/monasca_persister/openstack/common/excutils.py new file mode 100644 index 00000000..07a02beb --- /dev/null +++ b/monasca_persister/openstack/common/excutils.py @@ -0,0 +1,113 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import logging +import sys +import time +import traceback + +import six + +from openstack.common.gettextutils import _LE + + +class save_and_reraise_exception(object): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example:: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False + + If another exception occurs and reraise flag is False, + the saved exception will not be logged. + + If the caller wants to raise new exception during exception handling + he/she sets reraise to False initially with an ability to set it back to + True if needed:: + + except Exception: + with save_and_reraise_exception(reraise=False) as ctxt: + [if statements to determine whether to raise a new exception] + # Not raising a new exception, so reraise + ctxt.reraise = True + """ + def __init__(self, reraise=True): + self.reraise = reraise + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + if self.reraise: + logging.error(_LE('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _LE('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/monasca_persister/openstack/common/fileutils.py b/monasca_persister/openstack/common/fileutils.py new file mode 100644 index 00000000..bf7df55d --- /dev/null +++ b/monasca_persister/openstack/common/fileutils.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import errno +import os +import tempfile + +from openstack.common import excutils +from openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +_FILE_CACHE = {} + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise + + +def read_cached_file(filename, force_reload=False): + """Read from a file if it has been modified. + + :param force_reload: Whether to reload the file. + :returns: A tuple with a boolean specifying if the data is fresh + or not. + """ + global _FILE_CACHE + + if force_reload: + delete_cached_file(filename) + + reloaded = False + mtime = os.path.getmtime(filename) + cache_info = _FILE_CACHE.setdefault(filename, {}) + + if not cache_info or mtime > cache_info.get('mtime', 0): + LOG.debug("Reloading cached file %s" % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + reloaded = True + return (reloaded, cache_info['data']) + + +def delete_cached_file(filename): + """Delete cached file if present. + + :param filename: filename to delete + """ + global _FILE_CACHE + + if filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + +def delete_if_exists(path, remove=os.unlink): + """Delete a file, but ignore file not found error. + + :param path: File to delete + :param remove: Optional function to remove passed path + """ + + try: + remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +@contextlib.contextmanager +def remove_path_on_error(path, remove=delete_if_exists): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + + :param path: File to work with + :param remove: Optional function to remove passed path + """ + + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + remove(path) + + +def file_open(*args, **kwargs): + """Open file + + see built-in open() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return open(*args, **kwargs) + + +def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): + """Create temporary file or use existing file. + + This util is needed for creating temporary file with + specified content, suffix and prefix. If path is not None, + it will be used for writing content. If the path doesn't + exist it'll be created. + + :param content: content for temporary file. + :param path: same as parameter 'dir' for mkstemp + :param suffix: same as parameter 'suffix' for mkstemp + :param prefix: same as parameter 'prefix' for mkstemp + + For example: it can be used in database tests for creating + configuration files. + """ + if path: + ensure_tree(path) + + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) + try: + os.write(fd, content) + finally: + os.close(fd) + return path diff --git a/monasca_persister/openstack/common/fixture/__init__.py b/monasca_persister/openstack/common/fixture/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/monasca_persister/openstack/common/fixture/config.py b/monasca_persister/openstack/common/fixture/config.py new file mode 100644 index 00000000..9489b85a --- /dev/null +++ b/monasca_persister/openstack/common/fixture/config.py @@ -0,0 +1,85 @@ +# +# Copyright 2013 Mirantis, Inc. +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +from oslo.config import cfg +import six + + +class Config(fixtures.Fixture): + """Allows overriding configuration settings for the test. + + `conf` will be reset on cleanup. + + """ + + def __init__(self, conf=cfg.CONF): + self.conf = conf + + def setUp(self): + super(Config, self).setUp() + # NOTE(morganfainberg): unregister must be added to cleanup before + # reset is because cleanup works in reverse order of registered items, + # and a reset must occur before unregistering options can occur. + self.addCleanup(self._unregister_config_opts) + self.addCleanup(self.conf.reset) + self._registered_config_opts = {} + + def config(self, **kw): + """Override configuration values. + + The keyword arguments are the names of configuration options to + override and their values. + + If a `group` argument is supplied, the overrides are applied to + the specified configuration option group, otherwise the overrides + are applied to the ``default`` group. + + """ + + group = kw.pop('group', None) + for k, v in six.iteritems(kw): + self.conf.set_override(k, v, group) + + def _unregister_config_opts(self): + for group in self._registered_config_opts: + self.conf.unregister_opts(self._registered_config_opts[group], + group=group) + + def register_opt(self, opt, group=None): + """Register a single option for the test run. + + Options registered in this manner will automatically be unregistered + during cleanup. + + If a `group` argument is supplied, it will register the new option + to that group, otherwise the option is registered to the ``default`` + group. + """ + self.conf.register_opt(opt, group=group) + self._registered_config_opts.setdefault(group, set()).add(opt) + + def register_opts(self, opts, group=None): + """Register multiple options for the test run. + + This works in the same manner as register_opt() but takes a list of + options as the first argument. All arguments will be registered to the + same group if the ``group`` argument is supplied, otherwise all options + will be registered to the ``default`` group. + """ + for opt in opts: + self.register_opt(opt, group=group) diff --git a/monasca_persister/openstack/common/fixture/lockutils.py b/monasca_persister/openstack/common/fixture/lockutils.py new file mode 100644 index 00000000..6d89db61 --- /dev/null +++ b/monasca_persister/openstack/common/fixture/lockutils.py @@ -0,0 +1,51 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + +from openstack.common import lockutils + + +class LockFixture(fixtures.Fixture): + """External locking fixture. + + This fixture is basically an alternative to the synchronized decorator with + the external flag so that tearDowns and addCleanups will be included in + the lock context for locking between tests. The fixture is recommended to + be the first line in a test method, like so:: + + def test_method(self): + self.useFixture(LockFixture) + ... + + or the first line in setUp if all the test methods in the class are + required to be serialized. Something like:: + + class TestCase(testtools.testcase): + def setUp(self): + self.useFixture(LockFixture) + super(TestCase, self).setUp() + ... + + This is because addCleanups are put on a LIFO queue that gets run after the + test method exits. (either by completing or raising an exception) + """ + def __init__(self, name, lock_file_prefix=None): + self.mgr = lockutils.lock(name, lock_file_prefix, True) + + def setUp(self): + super(LockFixture, self).setUp() + self.addCleanup(self.mgr.__exit__, None, None, None) + self.lock = self.mgr.__enter__() diff --git a/monasca_persister/openstack/common/fixture/logging.py b/monasca_persister/openstack/common/fixture/logging.py new file mode 100644 index 00000000..3823a035 --- /dev/null +++ b/monasca_persister/openstack/common/fixture/logging.py @@ -0,0 +1,34 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + + +def get_logging_handle_error_fixture(): + """returns a fixture to make logging raise formatting exceptions. + + Usage: + self.useFixture(logging.get_logging_handle_error_fixture()) + """ + return fixtures.MonkeyPatch('logging.Handler.handleError', + _handleError) + + +def _handleError(self, record): + """Monkey patch for logging.Handler.handleError. + + The default handleError just logs the error to stderr but we want + the option of actually raising an exception. + """ + raise diff --git a/monasca_persister/openstack/common/fixture/mockpatch.py b/monasca_persister/openstack/common/fixture/mockpatch.py new file mode 100644 index 00000000..ab2dc021 --- /dev/null +++ b/monasca_persister/openstack/common/fixture/mockpatch.py @@ -0,0 +1,62 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +############################################################################## +############################################################################## +# +# DO NOT MODIFY THIS FILE +# +# This file is being graduated to the oslotest library. Please make all +# changes there, and only backport critical fixes here. - dhellmann +# +############################################################################## +############################################################################## + +import fixtures +import mock + + +class PatchObject(fixtures.Fixture): + """Deal with code around mock.""" + + def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs): + self.obj = obj + self.attr = attr + self.kwargs = kwargs + self.new = new + + def setUp(self): + super(PatchObject, self).setUp() + _p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) + + +class Patch(fixtures.Fixture): + + """Deal with code around mock.patch.""" + + def __init__(self, obj, new=mock.DEFAULT, **kwargs): + self.obj = obj + self.kwargs = kwargs + self.new = new + + def setUp(self): + super(Patch, self).setUp() + _p = mock.patch(self.obj, self.new, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) diff --git a/monasca_persister/openstack/common/fixture/moxstubout.py b/monasca_persister/openstack/common/fixture/moxstubout.py new file mode 100644 index 00000000..d92c9626 --- /dev/null +++ b/monasca_persister/openstack/common/fixture/moxstubout.py @@ -0,0 +1,43 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +############################################################################## +############################################################################## +# +# DO NOT MODIFY THIS FILE +# +# This file is being graduated to the oslotest library. Please make all +# changes there, and only backport critical fixes here. - dhellmann +# +############################################################################## +############################################################################## + +import fixtures +from six.moves import mox + + +class MoxStubout(fixtures.Fixture): + """Deal with code around mox and stubout as a fixture.""" + + def setUp(self): + super(MoxStubout, self).setUp() + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = self.mox.stubs + self.addCleanup(self.mox.UnsetStubs) + self.addCleanup(self.mox.VerifyAll) diff --git a/monasca_persister/openstack/common/gettextutils.py b/monasca_persister/openstack/common/gettextutils.py new file mode 100644 index 00000000..d720602c --- /dev/null +++ b/monasca_persister/openstack/common/gettextutils.py @@ -0,0 +1,479 @@ +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from openstack.common.gettextutils import _ +""" + +import copy +import gettext +import locale +from logging import handlers +import os + +from babel import localedata +import six + +_AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. +USE_LAZY = False + + +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + t = gettext.translation(domain, + localedir=self.localedir, + fallback=True) + # Use the appropriate method of the translation object based + # on the python version. + m = t.gettext if six.PY3 else t.ugettext + + def f(msg): + """oslo.i18n.gettextutils translation function.""" + if USE_LAZY: + return Message(msg, domain=domain) + return m(msg) + return f + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') + + +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('monasca') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True + + +def install(domain): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + Note that to enable lazy translation, enable_lazy must be + called. + + :param domain: the translation domain + """ + from six import moves + tf = TranslatorFactory(domain) + moves.builtins.__dict__['_'] = tf.primary + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='monasca', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) + else: + params = self._copy_param(other) + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except Exception: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale_, alias) in six.iteritems(aliases): + if locale_ in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/monasca_persister/openstack/common/gettextutils.pyc b/monasca_persister/openstack/common/gettextutils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e58b46576d6e521f4b6561f9837420c2d93336da GIT binary patch literal 17888 zcmd5^&2!vFc5lp(Lk=|(C9N#$%WLe1Eh-U*UfWS^6v?|X^;wZ=2c)buH_8+^zzjjn z3`PJ((rD!^$MVM7RHbstB{^)4d(1JZ`~#^{aLd)YaCmzW;Sc`Y{vBFOn=^N&KJ+>X0vW0^sr#U1IBNdVAg~OO$R;H z%ooNyGv<@cLnb_Af;saHy)T+@(d-;Hn@6N@)NFo33dhXmD^fUaHeZ#(3A1_91oLcV z^OOndQaEh_^i(%HXQYn{QLp44>}IQrIw} zSJCCC6*G{kdWrg77ew^KRHUel+3dyLX7SR+Y&^`n zzv!k}Qa(`%dj6B~MfYmh^T+AbAVvcS)I`%!>W2LYavhYd7sRk|HSGAKUWQg;_qyLp z!?K=IpAAFbk93I9RzFw@3YOkfke0IE>TfF2*?8qQ0+) znSjmchAS`>e%khz(QO)~cs0M%D(@7QWkrp}%A_S|4#%SRtyVOMvR3OnXh~@ZOr!2J zoY$Q;hfJ!;4vsbL;`AJ>nGxMML2cP_%+C!KLG^@BMT?*?&RnNxwLfTcTgCb>z~ z+JWNKq%d!O?wBu)`O+~Rs9e?jnK7SM&C@DWiA)t$^1^JC!1lR=@X^#%=PK<&*@LlY z(uguu%>0IbKZL;>K-~OxJBGZvkG4Z-8*Fak4&uzEz)iIY8Quz^F-02z=6B79vyBb!{CN1o^#3R6UF> zD4N&`+dgi%k3!c^Q;3Ugr@Pz>ABMfDG36@Z`bkMcM3>Wpb*En;wJ8luP$-rUEyZ0L zCD$Jgdl6cJ_~A(WLqSvgNC=@&)W1YJSarp;C#9`WyU1OO(s$TS%JhnbDZ_( zat*V(zC^K0DUEU3i<{B+zCW1uV7XPFG^9;YFL_l+xU+t(b^FTZC!(((sO?k7MnUeU zU8yQBffC$V!pZEpK9ZEqIw_&fnM%z$?5r)(s(91~rI#kiDspxJFC9+M~ zgu>p74JBfjM14OQBMdhwHON%OezQoCm+_xKLC3F_n~@B{8b&j!c55lcY#9NIdsW%dyVNT-SiXn#RO5YkzuIE7Gfv_@|`KR!1u?=OPNBIk*E8dB^tkWTS9B^w_wPdQw(V*QHH>w_fnov zB=ZXUoGlHIN=eDdeNb{{ZHe~72GRq>P_35c6fMo-aY>D6wSu_aYN;1zbK*?owPAuV zJ;}!2Y2Ky<@m}YNj>)5dc~rjA)=FuwBNdMZgOsF<7;2STrE#S3+Tv`zx;V>!GH9Ek zv{6HTXlTUb(hnmH^Z^s(o+n+!)75zJFdRhTpdA*zSK|71Jm^H-QNmodwRDc5Hltq& z-)%gjVgD*ZPXw-vnl-dWlnb;2>38@zYy$y7@{O0n0kU(AnWs{smPE5fV)2r03cCDW zl#QG22K&M<$mxuFZq$($#_=d|F|j*HQ~6Xu7opz9*u61)3eq+|AaSFV&+;5$iX
mB_L{NLvPIxBGlXW4KVCv=YE#F5hdkZVB**ndXo_hqxb1c$ z$lFx1G=;<5kjMT@Dx8Tb~Fekxgl#Dwi!wRBKCrniC3E|oIo8_Aj zqHG&7&4jPliyw)Y*>uy&PLXhy>Eu$_>^9w%QJSoz+erUb^wBvLIB8>Qbjv#YWt-Ht zp(p}%5RPyk`bi{eBdyBIYaf1mr9@B8*|dT)|Qw|dCVZZ^E}aZiM18`yETe>85uo~ z+pyD78(@fFHxl^DD}w%Hol=?h+Wv}twbJo7pj-3B!-tXnc#F7h(F!gLj8;K+K=O*k0++P_I_fKc-XJ`` zbI2q=C6fLHI$b0}1b~*FFa1xPuDCWScjK+xNc!$*5Vd2VC-$C*kcCcUCsqRH1u%>+gL9s?rChOCRxijV5FG%dann(ITWz%WJK#W@F(E^PDvldC(mOc8q@}0n z8-c44j-D~kDi&#m4B|eGpPD_X%%{N8P_t%Qz^L(y!Ym;)7VxMBgGk3K>ogV@S1c_b zg>riW15cFzQw+zBL;06+5wnEn8*2)=ZdG$ z=1AMAV#=K6b}#jQ4}Uc}QUbgSxS_95SN)`$dhB)o*c$I$kn{IAQU(;JQFC6Yo+iqI ze}XZPZXT;R(lwl{{6dPwTdR|PRu!L(n=mzwAjnP1`#P{;=HIfyLV1Sh;h`NdHt0+gp0 zg&7kqfYO!IFU|udm@&U>5TqlLI)_JC3u)oCM@SIXEUY*W&bVP zzpNon+_uVrx+?2??oyfj7WDlKE)-Q?YFJm zKg$;}Amu01HyFP{k{`THndU8A4A9ey>xxxfSDdk{C9vvnz%c1>y=cFS%NjGyXHNCg zS%bBvoduJ8X`W)exkfKAs7v{Pcm&ZYhRJj0=^R`DE!A3t6W3(8gw?vU?$IVGWGsol zKj@CI(wursX~kE|N|;KTm_o#*R{r#+Cy^D~_Q(#8&3Humy<GsEBi;(r4p*M!g42; zDRtfd7qWgOvjdNn{F2*KbiOzRGC{mQGLPSpNs$c%unSl3y>9+kTpVXeW{V*n!Z-1F zmmYj*?!q=a{sH^K{Qf4de2!NZ^Tu+?L3`G3{xwFsZs%D%gAJ}_nUh;h=%REPoj&yg z^{ErybJ@KT1c+XTvTuY@7++$}d6o;~XI%tqxk@rG>^m5TFN zkd4~gd9>^U(ZSj_f;ZW#f#nPYlUP+9^u|R~h}sc0-VeDMK@#G4)I&(w&ahJkfp{Kc za@lf{tm6G;cYWXw(`~H&(yzXR8_exN5qe>T!h32iPe+}PHVjY|DfzD0mbVb%VyRhI zRxL~AW?MF)QX#!tsH3%M z*uf%M_e{T62y%XPi^cN#|5$Q5BInI>)?lHjU#r7_5yUfhqndfYeLgvnC7@Zjxz|Wqg;+oJYYpRp*FvP?!BE0A!)C?+gkE$zjJ7H{;s7 zxRTFd+{h=^TuV8yTvk;rm`lq(iCp`AC_Y)$RK_f`Ve)VTyt>BK55-N$FqOvrmC4wA z4Wqzz7xIm!-sHBQTqtdQr?Im-BGwjMUG>Vh-9DT%~smLn^x=HLfWd0>e>?3 z!sA+*ca|p#k1h?p%hG!|WdfC9=VjDZq)PL%n|w|i>QVPa5WP=WqB*tuJl^(xjEhnr zrL@=Cm4uXk2f9)&)z@lRGIAQPG)`BY#pCrx{X~7fK2xs(P@sa*q|o@&iV1G@TnyO# znn_-xlmZ-sJ;TkDFlg(rYbXM0cxKG;sSRZn*-!=}S7r9Z01crT@+tftGQcB-o%{}> zWTkk}kdJJQy4N-i^?#>`=V`Kon5u z1%RKxR4-uDF!l;xK!*1C8lj(^d7^S??drXEB^sPFpW+r0HFz@&@hkHbmndVy9Lm}g z(>~)o_{!k-F7E00FDWx{O-3v+Q%@h`bQ?PY6=d@t`cY3%d*)A)-W7XJzh?C#hfQQ> zs-g;tw=*U~CWv5dqD2X?QcCR`X}KiO=5+F4qR7NhybOrP6k^gabwwWhTucvh8EkK2 zxuW}N*Ca#reu$Sf)?qW^&pXi|*p-3m0suRFtOlAEUK&_JVefb9%!7zaJ-@#d`0rf@ znc{&O2!FgE;E9Y1yM{A?P;CAS3<(=;I}vQAr0!3)TdQl}nlVqd^BXt+P%>jYy6F$H z7A~nixZJoaR>(e(yIHUG(M>TGov5dq%QPFj^6`~hx39c^n>**$Zrr(YkthMA;Ml@~Glzc*2!~HN$ABKRInlI(JXUd}wEj>LS~C;`B!KNo&st5wBiI5w zCB`&SAz2kuq9`kj1?t&_fnR~98=_6#ZZyE>A$b+udQj2;W@FPcl4DK-^5T+x(xn>H+mI&3e+8%G8kEEzx_O;K zhvq#&>E>`@vtd;`T<^I>6=JbBz=-SN;>2PV`cyrrnnSlk1H^%(&vBwkhB zQi)`_ZE6)yx0w{OAUyyf7|BL?fE`>E74axLP>3cPw0k40&DzSe0;3_mQldB(cumBw zmMDQQH3h!D!Z-Z#9c;j-G}~q1jNeLtbZMoOSgM;EkNh4fpSC;yceaB|9BMpRnpzbS zx_%4OVH^7Yvm3gmE8SRLT2QYPC)iD9WU{1ft$1!kWLOfc(ce&Gh`9)k4Vk@njC*gW z5xQT$Vm@{r0C&z^h<-(;c7yLrbkclFocKct$;5())!1wAoH9hY)C~vNA&|WHSF{`? zxn^{i2<@SUNtABy16=CP5x7%p8EI3z-{(opB7^Q+D8oSZkI2C9dt*LfL_G2Mp_)!~ ztFP}hJG_ga_z}-WoWyJSh*pk4H zb70Nrpu6yWvSY4aSiTS-vwopi(&=XM4G@53`Tk3Ga`?Cjlo|Ppw$jWyRlQNm6EjP~ z&fvhk9d|wH-P$Oz2Yv@ZZb4u{if#woRvt-1LCD}3R&tO->qRq)z>^38k`?Kol~2O% z2tYVL#zV{e4wNQ!16Ixs4RPzC{9(P@IVs0Hv?Z={Q z^vAZv^q%y=TLP_LF2)uHXaz95Wo?|4-&f@?my5%!Tebv>($u}2Q=K;N`dNEB46aAL z(DJ@ok*_kd{3(UlgFCM=LYC7*jkh=$G`2q{<`f2g{GdN+-prtjG}Z8PhP4TWHG3 z=K6v(1`6|fd0450{$TE<3ndFtR8K-cLk>jn46d0 zXS|1FpR7ZZ&VQgyrz1CwGv$&BU7d)=F%&TOz_)d$8A4uX!k@FAj7o9HFuR^BT#0)t zhS1OU(ZD|k+5g0WlYT(n{6iiTD@puV3{sC_BSU5ZsdCjkIU>A}J7mONZ~`iV=lmEP z>Vkq}CfAgxW&$@$AdsEJWoZRlu>aV|&~2&Ln%FEu&D5L1*F5^-zA`~aiW@X1`-iWB zOYz(8e1QcuH`o92bpa*rGw$XDmI(sgz9bXS~F*PxSE%Q1TU7$N4wTiP;mCHQfmFCa!m{ z0f?2MB+IBgzFdw)N_!qO{uDI*f&=23b?rp0vbYHFzOnROQcV&SI3!Xl_ELa6Yv2hf z8i8#k&ffC3!k!|Ef!&bf-|aI7#)>8Lmdkr!!)hTDNb> zCp7lru6#;kFRsZaHTGiF`(3{HJ)Q*qU_kGUc%s=*!4q-&Q(kiD`)oSIqg}$r3jD7j y?-6`Vn>gW<9IR|M@C9)LIAfzytJbQ=HyYLYobE%a*RbyL%E`Z|ztuR=IQ8EsYM?Rz literal 0 HcmV?d00001 diff --git a/monasca_persister/openstack/common/importutils.py b/monasca_persister/openstack/common/importutils.py new file mode 100644 index 00000000..807c2f5c --- /dev/null +++ b/monasca_persister/openstack/common/importutils.py @@ -0,0 +1,73 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) + try: + return getattr(sys.modules[mod_str], class_str) + except AttributeError: + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def import_versioned_module(version, submodule=None): + module = 'monasca.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return import_module(module) + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/monasca_persister/openstack/common/importutils.pyc b/monasca_persister/openstack/common/importutils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7b43e894116a2e14fadac749c2fec39cc5260fe GIT binary patch literal 2915 zcmd5;&u`;I6n=Knv`yQ` z(Ftroc}P`2CvdzU(n&~phhE@;4&_~Ox|FYovqE`KoE`_}tMme~YvQko-)H}D)aQLZ zh8cW2U)07$Mpda(dDOVFDqX2;lQ`JJ5sAzQN%39laQ2vA8K^7pii?)Kck7l34 zToTO$^L0Q!cIYagtB|fb4 z@G!J7i2V#^qFiG%phsDiS{oIno}*)(DfMYo z>a1#V_U3ioR7y@L55_=96x;V;_a$;@qb$|BcF|NtMcsgl(a=6R7!B}KV8C>$#`9XIHcKBPQEX`udNY#T zjnoUJC1i|X1dKgSPBt4amTn!}x6lRM4*HglP2o;32s*(|uo3j|FNw#ba{C_5z769f z_mCCHsmqXlCnN>msNI9{ydn)`Ww~@Cc>)(X0a!XyQC&o(izRCECZa~X!G!Nc$V<(s zMN$%-KWF#;Ft-6rEpzpBrZV>-1}v8n3N!v=g>nqzGNr`#onPVv>OqBf-=E!i_aCPMIL>`Y9VXlg_sa&Mfxv~o+c03qF z`1#AZQ!E#q|GJ&Ms3?tvBA1afOXyfuzdvCFh*q$NeQm8+!YtD|wDHg$jF@T3n+dCb#51BtK5rrimRy=} zPEX$A*_TrOBCQ&Ae^QDuzLHXjwvRAox$M#E1ITb2aug}D%J}U2X!aeLTPX%eo*B}K zXtxz3uj}s{_v4FU>}4kGagCZVabAQhz5Bz-OEUAwKLtF?kSQDlA~e(H^9PDGka*cjz7p8wZw;-d5H4VrVDml4O{aqFG()$rcasZc^qi zxb)&hByaU*>Jdu0^6vfNaT}1^+-gCqJnI1G)p!V<>#2*S@_6Y)x4C=*gQM$=Ic@IGBCA5x`U~j5UtM#~*H1q5wEjcRh z16FwZ*!>|^zDvxq{R(*sh@Q)^6Kq1P{=0>dCrMssNiq?+G2OmwJ!FpsEU6`lc2H|I rd6$RyyXfwy;{D%W42=A9pEzGKcKKT9h3mn3r@!7`>2LOX>#P3)DNY0E literal 0 HcmV?d00001 diff --git a/monasca_persister/openstack/common/jsonutils.py b/monasca_persister/openstack/common/jsonutils.py new file mode 100644 index 00000000..6d13d018 --- /dev/null +++ b/monasca_persister/openstack/common/jsonutils.py @@ -0,0 +1,202 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import codecs +import datetime +import functools +import inspect +import itertools +import sys + +is_simplejson = False +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + # NOTE(mriedem): Make sure we have a new enough version of simplejson + # to support the namedobject_as_tuple argument. This can be removed + # in the Kilo release when python 2.6 support is dropped. + if 'namedtuple_as_object' in inspect.getargspec(json.dumps).args: + is_simplejson = True + else: + import json + except ImportError: + import json +else: + import json + +import six +import six.moves.xmlrpc_client as xmlrpclib + +from openstack.common import gettextutils +from openstack.common import importutils +from openstack.common import strutils +from openstack.common import timeutils + +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return six.text_type(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) + else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return six.text_type(value) + + +def dumps(value, default=to_primitive, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False + return json.dumps(value, default=default, **kwargs) + + +def dump(obj, fp, *args, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False + return json.dump(obj, fp, *args, **kwargs) + + +def loads(s, encoding='utf-8', **kwargs): + return json.loads(strutils.safe_decode(s, encoding), **kwargs) + + +def load(fp, encoding='utf-8', **kwargs): + return json.load(codecs.getreader(encoding)(fp), **kwargs) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/monasca_persister/openstack/common/jsonutils.pyc b/monasca_persister/openstack/common/jsonutils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..032023b419ff4f7cb074bea7e22b39356fe0e740 GIT binary patch literal 5736 zcmd5=&vP6{6@Ih3T1o4b^~07WTmI3p4PJq~vg0^$5+||2P#6lC8Y!`Bi!e2s>6KO=h#tpE7w->9NzXs1TmpUH8Y!a;e6XHQ7S+6*}Oj~{d3ehM~+^k&Ju++_PcdnR&qhwi_%__R!RGo zw3ns5BJEXazai~6rM)I?gSP6_S|&Y9oo`9|ZCcPPyeF7OH%YHjyG#qM8;mee3IQWf znxYrjj+>-!Qh1VHz@sS&r|AXu^lb`fD4Z4HExDhQ`*&yndA`NBsdbwkC$l4X=SzS$ zxj|3#ZR)&Bo%cxJq3{&x_bEJ0`U48*sa<8C9M<@dA9DwOt~VAqq<-TN)qc4CXia50 z_N@+8-$rp{BW;?s+QxQdR5uO#u~xk--Hk$Rl&{))PucA#*)kv0Y6^chm(_i>?I+MQR!w!2UdmYUZQNOOxo3A0)~960%&Y%u_ewJoke~e{alAl zrPj5^5U-a-T{yO@8_RHwr+s@_J<|EE~O)fnFY1Hkd*-_yHgvMq^)F#hqOI{AhyM&6fYM5$^qj&ntv@T%vkL80D0@j^h1LgubpQhx%ARD-dWJz& zo;(~UsD=})ec)8S9JxI$4*rwuG<`wThW;e|0#D(h`w|Pcra4dc@;rO zj$&6lK0}=a@&Bd6_7>seS;6XrNdM@-AuNoa0lIvy9EdtC95`?=R|lbE*1wDTY_V!y zG2qL_ONG)&Q5rA7CL26A#B^wCiL#Ya;n)&&&Wk!E$F}kUec=eo(}MCvD!|P?J+6ou z)*mNJRI!&NhwL%Q9W(U=(Xif_$m5I=2Cd650Ud(jR&kPu9 z#DXho5BAc29D*Jp4+cTZL?b3b-};JAVqp83@GZNykqVPy#-Xx$Me)$4&HP{o4rt}W zh94ITH)4J_G7%Gcl)yD`)QV!ClN3P3BTew8#(ZX5SVuj?BDEj#44uBQV&`y@Q1|S% zO4~AHLFj;`2Vw_UK!8a%O|DtBm4XFR6>8lpPzMEP8aY9)01w$OZ-nSa8(AL!8_=aO z1J!Ickqnf-2j7a(-~l4f%Ttl(;Xn{|(|uc!|G^F@i0Bi`A@M)f?aUD=pwvutpdO-F z@9Nkx`*r<&PgWPOA-2wzZ{af6&^#mh`~v;7M9&?1USewD<1Wb_Z19=$6#PO6Nglk` zKtm0-ffG;EydlozF7AqoOgRb`(Kq{0UHue!&#Z=#wmORk;p&4l=y!Eu&1$B5sky!t z+3o&j6JfrJU<`JE=aydWB2Z1>U*|j+VgItB#52mSW*(K_z2&|J*rth%LkjB$-yK4Y9S~jk6x;Yg5+n00(J<4-!u! zzH?dPN_6mCKi0+?7i zjOcCvobP2>nl(5y-80-U<}Q~yE>}S=V=gBz-j#T1NLVp7i27?iY3^0H@d%f3(a`y6 zr&5}8s?JntyfW`hjW42KcPf>#Q-_p)lg_kr#hG_5jLkWhON-8wGv|y$rf|(T^W_Bx zveJ3TC!DErFg$t;?vHq$Wi-glsLnt`Tn#yNmrE}cBdDLSH$rE4@DeqFu-2NmCi@lY zw%?C!gEvuPIWk2wGkr*WeBY0eTUcQv%Ij_>fP85EBTTSvGLm)xj0BeohMPIl$uM{|XNQ3_m7eP91UdbIP&3qLIumHz525DX z5sIR+ok|wYvo4cT9%`H=I7i}$p07MZ-{ z6@VfW#6GfQ@c`dX(y%BvhF@LmDH+72T;|&}8_9WXkr~Nltw`~Bf6$w#3Jd$~3?CRI z<}eqcPEu}UHv3VG?>I#dA6n4S#mxSuSh4O6UIHgz^l?Uua)*;KM(Qn8ic$c%To1Z$ zu`&a{iQ+_XY$M#QyT}i(a>LAX7&PKv@v$VYH*;K!ILn`ak}#K}Z0Kr4(M^mO7eC{| zQ8rXQp`c_AG~*m=uB_#)?~rZf?zN^RS;gIabssZzm<=@5avc>x@herG=`t#W%91l# rnnBt+RjQ+En8UsNO4Fz*(8sS{zEXy^SFcW1>y^E<`;qPZBzy6$ksc5%C>>nWZ3aTbrKr5;P>J%+tjCMeafa;J~ z(K@6Rf)V{BdLX)5M8uI;i=({}@A4#gEbp49NAiicP@i>Gc8PV`lnp0(mE4x5EtehJ zWUy`DB;UEbNer*pa9^=)|6*Ob2}-}=#Ftla{8G>#A@(&C<7)s$0!+{`LcMyBr)B#h4i3#B#TCV#ujC%x&4F2#{A6I4JvS+1RXZ*|F*F>gSm=)!`VjnrLD?yK(Vbw={`~%B6`ccFUe4hf~=D<%(i#Au<{aO zpFoj%rQ&F+sy$D#U&yJT1jaSE0wk2){O>@_V%Nn;akHwQo`X6STn*Z~0f!X&f(=Fsa mvzsE%I-~n$3_O;$oizDw9LsyTs8*21!BkDdv(rgDQU3t(U`8VV literal 0 HcmV?d00001 diff --git a/monasca_persister/openstack/common/lockutils.py b/monasca_persister/openstack/common/lockutils.py new file mode 100644 index 00000000..b970b21d --- /dev/null +++ b/monasca_persister/openstack/common/lockutils.py @@ -0,0 +1,322 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import errno +import functools +import logging +import os +import shutil +import subprocess +import sys +import tempfile +import threading +import time +import weakref + +from oslo.config import cfg + +from openstack.common import fileutils +from openstack.common.gettextutils import _, _LE, _LI + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Enables or disables inter-process locks.'), + cfg.StrOpt('lock_path', + default=os.environ.get("MONASCA_LOCK_PATH"), + help='Directory to use for lock files.') +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +def set_defaults(lock_path): + cfg.set_defaults(util_opts, lock_path=lock_path) + + +class _FileLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def acquire(self): + basedir = os.path.dirname(self.fname) + + if not os.path.exists(basedir): + fileutils.ensure_tree(basedir) + LOG.info(_LI('Created lock path: %s'), basedir) + + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + LOG.debug('Got file lock "%s"', self.fname) + return True + except IOError as e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise threading.ThreadError(_("Unable to acquire lock on" + " `%(filename)s` due to" + " %(exception)s") % + {'filename': self.fname, + 'exception': e}) + + def __enter__(self): + self.acquire() + return self + + def release(self): + try: + self.unlock() + self.lockfile.close() + LOG.debug('Released file lock "%s"', self.fname) + except IOError: + LOG.exception(_LE("Could not release the acquired lock `%s`"), + self.fname) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.release() + + def exists(self): + return os.path.exists(self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_FileLock): + def trylock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) + + +class _FcntlLock(_FileLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _FcntlLock + +_semaphores = weakref.WeakValueDictionary() +_semaphores_lock = threading.Lock() + + +def _get_lock_path(name, lock_file_prefix, lock_path=None): + # NOTE(mikal): the lock name cannot contain directory + # separators + name = name.replace(os.sep, '_') + if lock_file_prefix: + sep = '' if lock_file_prefix.endswith('-') else '-' + name = '%s%s%s' % (lock_file_prefix, sep, name) + + local_lock_path = lock_path or CONF.lock_path + + if not local_lock_path: + raise cfg.RequiredOptError('lock_path') + + return os.path.join(local_lock_path, name) + + +def external_lock(name, lock_file_prefix=None, lock_path=None): + LOG.debug('Attempting to grab external lock "%(lock)s"', + {'lock': name}) + + lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) + + return InterProcessLock(lock_file_path) + + +def remove_external_lock_file(name, lock_file_prefix=None): + """Remove an external lock file when it's not used anymore + This will be helpful when we have a lot of lock files + """ + with internal_lock(name): + lock_file_path = _get_lock_path(name, lock_file_prefix) + try: + os.remove(lock_file_path) + except OSError: + LOG.info(_LI('Failed to remove file %(file)s'), + {'file': lock_file_path}) + + +def internal_lock(name): + with _semaphores_lock: + try: + sem = _semaphores[name] + except KeyError: + sem = threading.Semaphore() + _semaphores[name] = sem + + LOG.debug('Got semaphore "%(lock)s"', {'lock': name}) + return sem + + +@contextlib.contextmanager +def lock(name, lock_file_prefix=None, external=False, lock_path=None): + """Context based lock + + This function yields a `threading.Semaphore` instance (if we don't use + eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is + True, in which case, it'll yield an InterProcessLock instance. + + :param lock_file_prefix: The lock_file_prefix argument is used to provide + lock files on disk with a meaningful prefix. + + :param external: The external keyword argument denotes whether this lock + should work across multiple processes. This means that if two different + workers both run a method decorated with @synchronized('mylock', + external=True), only one of them will execute at a time. + """ + int_lock = internal_lock(name) + with int_lock: + if external and not CONF.disable_process_locking: + ext_lock = external_lock(name, lock_file_prefix, lock_path) + with ext_lock: + yield ext_lock + else: + yield int_lock + LOG.debug('Released semaphore "%(lock)s"', {'lock': name}) + + +def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the foo method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + try: + with lock(name, lock_file_prefix, external, lock_path): + LOG.debug('Got semaphore / lock "%(function)s"', + {'function': f.__name__}) + return f(*args, **kwargs) + finally: + LOG.debug('Semaphore / lock released "%(function)s"', + {'function': f.__name__}) + return inner + return wrap + + +def synchronized_with_prefix(lock_file_prefix): + """Partial object generator for the synchronization decorator. + + Redefine @synchronized in each project like so:: + + (in nova/utils.py) + from nova.openstack.common import lockutils + + synchronized = lockutils.synchronized_with_prefix('nova-') + + + (in nova/foo.py) + from nova import utils + + @utils.synchronized('mylock') + def bar(self, *args): + ... + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. + """ + + return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) + + +def main(argv): + """Create a dir for locks and pass it to command from arguments + + If you run this: + python -m openstack.common.lockutils python setup.py testr + + a temporary directory will be created for all your locks and passed to all + your tests in an environment variable. The temporary dir will be deleted + afterwards and the return value will be preserved. + """ + + lock_dir = tempfile.mkdtemp() + os.environ["MONASCA_LOCK_PATH"] = lock_dir + try: + ret_val = subprocess.call(argv[1:]) + finally: + shutil.rmtree(lock_dir, ignore_errors=True) + return ret_val + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/monasca_persister/openstack/common/log.py b/monasca_persister/openstack/common/log.py new file mode 100644 index 00000000..0f85ee9e --- /dev/null +++ b/monasca_persister/openstack/common/log.py @@ -0,0 +1,713 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""OpenStack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import socket +import sys +import traceback + +from oslo.config import cfg +import six +from six import moves + +_PY26 = sys.version_info[0:2] == (2, 6) + +from openstack.common.gettextutils import _ +from openstack.common import importutils +from openstack.common import jsonutils +from openstack.common import local +# NOTE(flaper87): Pls, remove when graduating this module +# from the incubator. +from openstack.common.strutils import mask_password # noqa + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of a logging configuration file. This file ' + 'is appended to any existing logging configuration ' + 'files. For details about logging configuration files, ' + 'see the Python logging module documentation.'), + cfg.StrOpt('log-format', + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s .'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths.'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and will change in J to honor RFC5424.'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Enables or disables syslog rfc5424 format ' + 'for logging. If enabled, prefixes the MSG part of the ' + 'syslog message with APP-NAME (RFC5424). The ' + 'format without the APP-NAME is deprecated in I, ' + 'and will be removed in J.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='Syslog facility to receive log lines.') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error.') +] + +DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', + 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', + 'oslo.messaging=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN', + 'urllib3.connectionpool=WARN', 'websocket=WARN', + "keystonemiddleware=WARN", "routes.middleware=WARN", + "stevedore=WARN"] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', + help='Format string to use for log messages with context.'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='Format string to use for log messages without context.'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='Data to append to log format when level is DEBUG.'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='Prefix each line of exception output with this format.'), + cfg.ListOpt('default_log_levels', + default=DEFAULT_LOG_LEVELS, + help='List of logger=LEVEL pairs.'), + cfg.BoolOpt('publish_errors', + default=False, + help='Enables or disables publication of error events.'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='Enables or disables fatal status of deprecations.'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='The format for an instance that is passed with the log ' + 'message.'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='The format for an instance UUID that is passed with the ' + 'log message.'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def isEnabledFor(self, level): + if _PY26: + # This method was added in python 2.7 (and it does the exact + # same logic, so we need to do the exact same logic so that + # python 2.6 has this capability as well). + return self.logger.isEnabledFor(level) + else: + return super(BaseLoggerAdapter, self).isEnabledFor(level) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + if six.PY3: + # In Python 3, the code fails because the 'manager' attribute + # cannot be found when using a LoggerAdapter as the + # underlying logger. Work around this issue. + self._logger.manager = self._logger.logger.manager + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + self._deprecated_messages_sent = dict() + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + """Call this method when a deprecated feature is used. + + If the system is configured for fatal deprecations then the message + is logged at the 'critical' level and :class:`DeprecatedConfig` will + be raised. + + Otherwise, the message will be logged (once) at the 'warn' level. + + :raises: :class:`DeprecatedConfig` if the system is configured for + fatal deprecations. + + """ + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + + # Using a list because a tuple with dict can't be stored in a set. + sent_args = self._deprecated_messages_sent.setdefault(msg, list()) + + if args in sent_args: + # Already logged this message, so don't log it again. + return + + sent_args.append(args) + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(msg, six.text_type): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid') or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {'exc_info': (exc_type, value, tb)} + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except (moves.configparser.Error, KeyError) as exc: + raise LogConfigError(log_config_append, six.text_type(exc)) + + +def setup(product_name, version='unknown'): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf(product_name, version) + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string=None, + default_log_levels=None): + # Just in case the caller is not setting the + # default_log_level. This is insurance because + # we introduced the default_log_level parameter + # later in a backwards in-compatible change + if default_log_levels is not None: + cfg.set_defaults( + log_opts, + default_log_levels=default_log_levels) + if logging_context_format_string is not None: + cfg.set_defaults( + log_opts, + logging_context_format_string=logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) + + def format(self, record): + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + msg = logging.handlers.SysLogHandler.format(self, record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(project, version): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + try: + handler = importutils.import_object( + "monasca.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + except ImportError: + handler = importutils.import_object( + "oslo.messaging.notify.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(project=project, + version=version, + datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + logger = logging.getLogger(mod) + # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name + # to integer code. + if sys.version_info < (2, 7): + level = logging.getLevelName(level_name) + logger.setLevel(level) + else: + logger.setLevel(level_name) + + if CONF.use_syslog: + try: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(facility=facility) + else: + syslog = logging.handlers.SysLogHandler(facility=facility) + log_root.addHandler(syslog) + except socket.error: + log_root.error('Unable to add syslog handler. Verify that syslog' + 'is running.') + + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg.rstrip()) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + If available, uses the context value stored in TLS - local.store.context + + """ + + def __init__(self, *args, **kwargs): + """Initialize ContextFormatter instance + + Takes additional keyword arguments which can be used in the message + format string. + + :keyword project: project name + :type project: string + :keyword version: project version + :type version: string + + """ + + self.project = kwargs.pop('project', 'unknown') + self.version = kwargs.pop('version', 'unknown') + + logging.Formatter.__init__(self, *args, **kwargs) + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(record.msg, six.text_type): + record.msg = six.text_type(record.msg) + + # store project info + record.project = self.project + record.version = self.version + + # store request info + context = getattr(local.store, 'context', None) + if context: + d = _dictify_context(context) + for k, v in d.items(): + setattr(record, k, v) + + # NOTE(sdague): default the fancier formatting params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color', 'user_identity'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id'): + fmt = CONF.logging_context_format_string + else: + fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + fmt += " " + CONF.logging_debug_format_suffix + + if sys.version_info < (3, 2): + self._fmt = fmt + else: + self._style = logging.PercentStyle(fmt) + self._fmt = self._style._fmt + # Cache this on the record, Logger will respect our formatted copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/monasca_persister/openstack/common/log.pyc b/monasca_persister/openstack/common/log.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d82e4958a6e87757dda1e23be79c4830356c0a8a GIT binary patch literal 27386 zcmd6QZERdudfvG+Bt=rBNPSVC_FhY-J@kqr$@0n@S@Mb^DVtu3;$BkLnqKXAIQNnq za=vu$9ZTFuKe}?;G);m4O@B0Sfi|0@pFfIpkt~X$Em8zXfucpz7H#vViv}pR+ZG7W zBtVM->GM42-aA7{-rcr8#e1E*>{W|Y^&T;;Kj1K?GpPXE+nB>m^{Cge0=w81u zzAyhLnq$@wu;H9p%cbGGS<9#4zOHb8S9rjz4WwoEnYDdsc+jj3rr{y8Hl%Rg z)P_xczggRFoTR@Ivo@lW9tRVH+P-{D@gQV&nT2KWf&F+W3g6O_=&Ivvv$|bavbX`_1+V+S8bvq{=TH#GhMaLcD4DkawofTUr}GLD!q`li2+P?jq|mx^e=X$pG(s* zhH0kzMYVID>6=)+7gESvP%S(1i^>JQe8Kpy8~>8=-!T4{jDOkqZyNv0#{VhfUork$ z#=mO(YsP=u_+K%b2h5tALg~8tno(ajy2dc8^f{%^cctG{`a4R0w<~=?>F+81t7$p_ zxXAPXP{j(~O49+8C8gVyz1@|*+?D>dG#xPU)c%UvS?x-{)0O@{(>HvA z&YJC)O>hcXz+)SEA&X3!fQxT)5aC)Qk~iB}%e&?nLZw#?;{|=OwqG|-a>m&mReZ{< zO_{rm*GymVn%NmP;eW|Hj^lrz%rBY^(D0i1#F!_>e1MV-$9&!YkO^z&jQjl7}aL6!l$*kEK0*s*DX zHJ0KTS3FE zlxwxL&d24j+H6N`->nDnR@0A)uDh_I2KQDT#;6+2IBC2Z_~%?d*eJJavAfX>>*Y9R zqbSsl0>9|aU|@EnwcT@Wx#3nDQCx0R0=M0+`or}yh99`Ca)i=uIYKDJFm1stFarDqiPTiV*r3e9bGVUquTH>8`Y28FsPMdTdSN7 zzLJdw@Lq`BaxH4&Q)@nm+$gX!!C1QI)vyskcq?qSH@67f4b-pxDgG=Ku&O4`<5St# zv=N9?Q*VA8M8G~{syTiJ@UgipU$Vjd%&E7UVcd?Zwa6w8A#pouHoB77V6BPuvn56m zt(T*n5~28@8T!#dq)dJ=Jz1Zg^xesKuT0*$GPx47njfsUHx-Ju&`-?X{dk{pw1{yZ z!dpSD72QE#IRvz^zNjSZY_{W8J9Y~IPKJxM;Nzf1c+bt>xbu!68kBvmVTu=bXS}6_ zrFUGFoGPljkArZ%iDo!}kE6R;U-f1fxJjn_BfDEzx_SF2=uN-?CQWmhs+-g07HG~+ z8JJ!M=4;B!Gpp~$V;F`Xw8Ef5R4Fyez@A~Gb*e|di|8t-jCq`6IfH|3za5rEejC+V zP;^DZ3=(l|&j|`PcHQ7%H4?F)c|rHt7C?nJfzv()3WRXW>ww6Q=rB45v<*bG%e$cB zMpicABYv~et_O`+V=QKKOAwq$dGoe+Yi2e2A%b)B%ijF#%~z8&zc_6U~e6e2Vy;OrR7=q=Ry<-xew@P-yseHWdiRO^{NHeShx+=NrVp$R%M_BO6Xef=X;GMQ2Vq8#TZks3s&H zSj0y4A=;q8y|wbr9GI>mY1iOW^T>9g=D-_MDif*h-FCHY%VJbcyth- zv+jlkgk~5}s`!FOF*=M%+Meywlq*##2QiUt@%B5VJ1g^EG>OQH7RcR53J{W5=YXq< zxw_B^K!GvY9-s+Y@`EspUSKQ8&Cm}tfORQ@Zjs##ixK5ixqiQOjodyONn*9?x=mpA zdNXczV)t8B-^NIN(fwMvR@n;byPXnI+mEi1&1-T^;8oEA6J)en1ZItL@F~!M3 zJqjw()P+ls?1fg?tU$q;Vp!6ZVhKEEvXBHmoGc{ZC{=xBU{OPaAq0ROQ_($?O;xg~ z2(4RvX)1~?q36B)4w!=4DJ`eGKe8B}@ZjQ}IX#!{M(_WB&T0Cy2YvbfYS1LPY}`bI zbS|=XE(otJ&c8pu2yO_mQjCf4t@e7Y z8f}%tC8Fp8lAgzZRSa4e^=giZJjDg6G(g7ceggoll~R6)Vv*qdytY)7{3*tc290%B zDoV#o5k6UAiH;A`L`rM!O-LI8d>5alK1Hb4jJ^{AEFb5d! zO|40f0oe0$O6fD+NPe|&cQvJw_cHQ4DmNad(V2=uJ;FTiB@E>s;TKWY9&m>8L-|U& zT%3`XYaRit6;^C#(1frrgRrnEMpB%kWp5;$p*ev?w46`!1}TVc_tA>?*qDYISP=;> zX*&@&OMbNy3vAvi=s@7AMky20>a&DOZ(?(at(W&j6to5f?h_KR*a2mapcs#1Fq#f5 zS*ecn&yc3gt8qr0KIe=x=2VcRKY4rw*%uLr0Vl_7=LkXzFhKDHSlkKNRV{y8lHEou z)Z!Tlr5OoEl<&ej?uRJo1u%dShPBvQQOKk^K1EELN%6mO6yWOld zJgPN9a!D((dpm!RJ0g$5x3fOILSiaVB`!@mE7)r?rnt#YhZr~T4#3%R+Sq>3SY?ibmaLILU!-+GX*h(6Jo9@RLCYUJ4_7V0t$nb5bcrL1toSMZctkO z;PuOzJKzO%IJWy!tEBojVQ^*9s12>fyM~Zf$3l=9QmcCoFYhLX@?SYl$}Qsz^o{52 zQcof3y~YnArgELpD%2zT5-|TW#w6WzAUBkk4!l_Y*6!1E!}S*3P%Z{3-C!}nvFK6C z(VJs+V65_5rUnc9+l`$@^FgD+M))HF5fnmlv;iD}(p9wqVG2_qy%9#BuUl>Af?MHC zeoV$dgkdTynJ^-gxH6FggLs5JkLXeO;@x8PC44$7&?Dd{idnT$jZ39J!H8tv6iWkJ ztE3ta;U!otA^>QB_eAq#KMV^#+j&wL0VP=gArsCgCASC6_P!)s+Gp<1Ct430N%h-7 zH0ym8pO~g5@LJ0^JAe|CimDIw@A3ziym=NT@ayFU%tYbmNEg=F{x^nZwVd3LbKDu~ zJLOoFu0w}p=7K*AL0~HJt}1p=qNhZ;fWiGS=Rj13P#cgzVAk5|pQcNalIZen1T0+! zV7F4+)^O0n?o$U$uuqfh52GMop5ef~mC3NHvltX)4W*K2n^y?V#Ed0{22Ei(49rV_U zSkA0la0F~%@7#v_6lGwi_2H9tlRt?kAO3+js52kiJ2*p8*!p#>$*kK@Mppz55x^gj zV06|NYD-wrW2XYIRzkS=E9Kg&*261@&6P?G-smeI&1LRX5I(y6NUof$F}g9z)xCqg zjb6hCNIciwzv}U9!}MsHTW#hWFt*M3C9U6 zkr~)&iR|hyqRq22XV?-W?>dr}3ZxfL^zkM`wET;6r@+(pty9xN%cEmlyLb5Z5(}&} z1K3n!DVm8po9Igr6o|`KL%7b+3_myUQDHc9sHIatl0B6^ANn~VlcHYWA0=^ezhp+o zp+6jU4&{&MMsj`F9!c*aW8oylgg!uF5Gx5y8aoILcKU2j1H!`276uYfGw5t*p9!x? zJ;MPa$6Tf5M4=OprK3UGfRuepub}Zh6Fs0}57elQ zhGSc0TlkyNKFSRnUxix#l-;s`Bn_Fv01q=hhK2*3=RX-JuiGOGPavuBf_Vg%@ux~1 zH84~ligS;bm5FhnmioHNvEY~@;2-pv6nFw95h$SYsM8BLIkxAl1Z@@0EHqF zTXh~&;=(L>)YrW8_y}Dy8xrk|X+&0StOLpr5ozO&nZL)^tA04Ikm7 z{P*#0E0B8DYPO^sdqozaQfzll@J0v1*?eY`L5bB~{_PYtZn zZ0*X_&_y96j?NrCfXsN%Ka6B^HbLp+J(tbWRuiA!MbZgPA*Sz;a|nimQD-=J$Qgj9 zIN(emH0Cfi;Ed~g5V@y8L!*cn_H&XQEt1|Pt(R4q6V*(ek*9FK{}F3YK6OmL zep>i+;G$5k6qh>~${?Xayd7{xazg`SeM6@;v-ehDS4vN=aNznw%;EY+#6_!gl4$W4 z>LxaWc@uy{u`z_M2_4kxngf}N$?E)(tIy&W5$8cXR>edcsak|V*_{TFcB3A5_Hn{? zqV7FFV;%Mha*_35OV2n1!3(a0N)g{Na{LA z_ps*>6py@GE7#Zk^4qWlm`Ed}#I`xFd$ zoT+7%fEJE)=zxaz&9ucT%i%ObBo2uqGS!CNC7xjkLNTa{m#LXjBY2<&RN996P==x2 z18)o?u`roWJ>VY_h9gKfU}ytQK6eoeuEJ@N@;G$iCAS#WBUA*ofk+bgmnr6?!ZRRe z3Mlw8i-^V*0(tk@G|+dy4O#KZa*&|=Y#Mf&gEoY%=8z3Ro)6p5e&g@AA>36Mv7u3e z`vd9&GWmcF;ReB&4Z+bfZbLX(f6#^w@i4h+9OmtU%U~e>5px+P`S>Vj<{#xoBK)74 zCCrpiRgPLE217k&!c&s+$CZ6dyBCx=q7vAqZl7cg(AY|00#LJ_qC~eBt4~fBScA!R z6p`{Mtj&&=sGyBCt=%Cx0ig)Z^)gveKeN6$VK9*kM}`NH%HtE6Rm(d2e4(OFwTLKm zP#33>N@+G*i?`qy^kvnvLA!q@VkGiV2@*%%L!?Ti7kENJlmq?XmOa^`s710>oZ1uP zeQM6$pJM}mo}ZuL=P&T{v;6!;e6o2g!6D@RJd=Nc$wc0C?6Y!X`ra=xgW|{znl&_s zWXx)T60~Co+$u-%#x1fEAqM79s^ZkK2dp+85nk}}+x1q|kt6a@d!Hb;_f*XFvH*qz zNOTlv9TU#*kN+<>=8VJIbt1P^pk=O0NqL1)$|q@Zt6@8^5cw2IojEJiE9psIe}_{h z1Yq#$9~qJWBezO9mX5LkwRjc7F+enIP(a(Myi1uH1@2+|U?K%$LlFp4tC&l!UZ(Uk z=4T`{!Q{Z$GAE}k3Qhr9hhY7A7#F}v5@)CP%V^Krz{d)SWkx^b=L$ch959=7{W_AV z)NIstWsJy>Qp!5V`Ej|{4kT&fbxU@DfsyY6c<#3=v1qfGOQR!1G#o&eO(wO~Z0 zOU+yp@FX91EFy8OykFtxJNVc%PDyD!Laz8E29U5sIScmA1Z!<|BZ$d+#ZRt+n*i-ioT5UY6=FJvM!q>@Jwt; zI5mX?dgx*0nS#k>vVc=uCDO{ApSnOGkx ziMh+>>~%7Pm1mIK^SNtrN;G1ZO25EKoNj!5DkjO^B38+H!KQkzw2 zpnO?^mws>)cbNjapP-Ut3*vGW&fh6tq%2UmVW=Q9*j1QDAW}#Qkbi_g z#&IAeaOw{jnWX?J!EOMhC&&_L2~Zp-TqC~H5rrsh$ZHJHT&+Z4vo($*brkUj4d#-u>Hlp7aLyebg{)vZ^zrK@z{569!;!67NXokFnV)!IV<~dCM@%HS@;)T7@i+iK5?~PvC8+~JM^h3md&OM%b8z=GMAH(rp zoO-9{NmjgbZUu@XBtq*xXD`#7jX=FFA=Q` zmjUcWtSYW7B_bVnmwFCdE3tTmIvwC{DU(?LfUtfUbD~j>Olv52$~g)j1pi~o2z(Et z`u9jqQ^l>fJu4s{rv~u3f!|Mbj**C*$B*!*_83vE1RDgu0y@LoY7Ngg>B|$U!fV*P z=%ojerzT?pS*J#2iRn5k+rb#`U$6{&=-Ow@FpWd)`Pyh?|C(z+K;R5?pJXo~!{01^ z(KJ4pfPl!b(10$(6TXlF@@pIebdE8B-aex25;i5qxYjn15s(GPr^xaC13q0aIm@)` zOvBH9dZoQ{c)w4WaD7eRap$08X|J>O(1wHMI-^ynS3~~y7|QRZ%LpRcr)8u{KxU$4 z{D)W-EF;htf)N`7Q!c&40fY0QKJ&Mp+{ zaXOZvDR2OXs7AwRb+_>fc1VY{EBa#t1u!WE<-4mCH$=%0?6i_W9-)u?VI!M=g-vqw zN1@y}0KxWz+QCyhX*zTlc*wD>=1kZo-qEoWzkedFf#+YOBhhsZd6?A5vjz&?Wd|os z_^GO&QU`pw2Y@-J&R;O$_qy_4w0Ur_qc>aJzUlHXQgIgxw`Cp!ym-avB`Q>C>l@JA zhRow(^B5`;AOYS5H4HGe@IFH=YX|n_=^S&drlHpwu87v z|1w6H+kMZ(lUm;c8pkUpY~LJf4TbvEJ-R56(noSI&7psdLSm z$DyEY#7NU41YO4q$TzRqw>@OI{x^t6Z{c%CcgA%W)c4ct@ac5j_k$4gz&R(JrLqbg z1{1a$bh;PCk~?h{c!JU6Jgh1*&k#NFQgG}r6OegVnI|*BA%yOhV_dEFZ^A^76p%uR zM`P0RRD3+{H>6+BHfzl=ElH>LN(_faJuUoQmLr?Rc_Lh)^-d$`<(bH`_63uYeQE~0 z2xddiyX{HZF6ea=YkL?&4i|ww@nj`5s?Ne_EZEfDOda=9vN|zJKe|SwVZipmGbCBA z$7PVtg%%e1Mv2Cv+qNDz5z^v#yIi}3T|w{Pv2|`1I%y@mERamnBiolqwJ_Aha*ZvZrM5u+--TF%h!P}WvuW>ZZP{C$ z8Z|O>M~QQ#_YH9k9%eYK7h=MF&6Ia?rMkm0rv#WItXEn7JnKFaO1;Yg5P$9XqzQK< zX0L?h-^YOdfWi{IQF`aNa}bt?0ce`^7|{kXkhkGHQuFXQjVAvxe>~4K)Wi6fr(zBu zfuDqQD_k3&#`J;M)!{Z9Bct_o~%flg;hI|2=0e3 zZ5#5>Ak0H%D2mXDQ~@R03Nd90!pVN`4pOZ4N$R%Nid#%uVidpLs#YA-8&E_Ns~S~& zf>+uKwMLTuEPguwVkb)wP^lzajD8y*54Num?=sbJCquR}y>Z1umDX2^$8Gcq)HFP$ zdQ}O?i!=F_Gx|+DwZTWHtVIf7f9#HJu;=PjYr0)dQTnPRV)zRq-$P#d6q6*WPk&JbMZh5 zzSwf<7|Ii{jJ-GR*od7mFG6VA-cKWBaqbM$t}}{XPnm)QmR9%{=Z+ud{*=#NBC1rv zqf@w|t=;5*M=9-Iy*xNWXA%naO0wiIl$)FcRT>yzb7Otp4-tHpOwJ)SNbih2_t6BJ zO7l7|sAGcEexf|uo}rhN`bp8VuhS&zid)7B%k&u_oa9QJsXCaSa>9*T8PsDB^RI&Q zDem%wKUo;GoAg}~p73gHg2jb_^jmqmlYSHBBe`WFhDN?f21-wI`nrcbw~>^>V_P3I z+u)WB`!Zhoo*=n)ry|~l%No~9DbjWI%9@J_hvj5=_Fcfv%YITjz^yeFJ-Cw05h|QZ zJ&R}K_#TLT_3QyvWWG~qA8aDjI4&?d?kVaC~n2|+S#nS1@;1-cS|2EJ4b-q_ns1nVlX)OfOkf4CRvX#tuC&( z(*O*lOHrwOZ-{GdTdPnOl6#WG$uy!le7JFv3lXMvumUg#n6Z*X*kiLqY>=@ldyZkN z4^mcW=A!)tT*9lus`#zIO%Uv6syhRue6pz$bZ&*L;`KyeOZGye3!5h24+H{j>X|q^ z?W0#0i2>LN3FW}@DK($I6=FdLXb=UvE<{{O+r&{2UY)vvx3d(`3wl~1qSFoY6xLUXok0wY*f5J<^p}AVRL3A{QXLg22Gd}ouPU7?t3#xNqDs*O z>j$FvA`Bxc!nyzfUJi?p_50AxB%@*YDNnZnThRzCF%PF{hT4`9Y3|->^9`7M`grpR zTPQRHBNA;+c^C+GVF)rT597n!y-AspbYBWCA!^m%p^*kv5QP!y>izE+{Jm=vc*jA8 zsqlw9bcdS^yd^QD@IhUoK)pjW<8L3~%h2H&hs9tbI^)0;1S5})9($AZ03kS{U@#hy zedQX9kO+^P#!G7BgzQHW+a(;K1XNDah-AxfM5j#nZJoS4tr5c*Bi$F@(hF)I19{PG ze}PqO)=GisFM50o?vhG%YB=&g@|aQMx@4r);rIHSwlotyWpazy}xmt zAztFpnSsu>wR3U9gV`!ESX@9zV})K=kE9)7ZanU2w?lg71FAxuHDr-E#t@x=_B5t+ zi2ueYJy_m%`T0Y9?0y!<=(Sg0#xn`H-Lw+#;(0~Nbcizq?a{XD29v4kJXmBGLmI_GdEgRslL$ZO_Ih28;&hb2)FZrX`;*K~c zoMZi?*sC4EWe^-@!FLE@xMz4d1RLE`_CghBNGQ$X7g0e2J;Or?#*v+HNzm3xAV82- zSL;}YxfBnoARm%S|K#z!3QU_Y+xElW3T^=NCR&2aSa+Xjl}6$)6X_1dJtumBWswC{ z@8ETtPeo)0I1EtCHf)aKHKlP9(> zWq!`$W36x+&^q)xJYDf;n94z=`Vx-c;D~qReVZ9TN7Jh5pN(4YNrZs?Ya|ig3^RCK ziW^bKol{8RohVq7oM+A)_YpW$0>mGz)s^jaW~ETCHZI(xe2r5h$_$L+ z-HSs*Mejc%Qo&Du^7!7sFIvGT(AEWSPx}aiUh=qOpyQtq`jLU3+p4^?&Na3m4wH^m@H3ayg4!eCyJl$ffR_i*>0k#2XHG zmMs#!N&OO;;6vms_L6?L4h+ta8r3)sVX9srEY03tyzQ-6snO3>l<4?A3Rzaq&Mcmv zMX0l$y;@2#@_)jmhy=6V!s-H!qIf4cXExfI znnJyrACdAmIlplPXr1lc1sKhH-#}soKmFm-+`vzd6j_u75&~txzq?N;CyyKHsKia( z$);J4o6pW=;$%C`!^rj^G8^ZQ%u&H6H5*6M_g7hwgvzs3VYQ-TP&`MeYZTpGi`lNIS3U&GUcZNW(mV8kSr}~U!OpDl#8;W5 zvQe+Y&_+-f=7`L(rY96-z@AAThqt~%`nytZq6*2Mk60 z{3<^$kVF0&L%+e#Z}Rh7{Lp(Rx2k+})>}vClgEpw*8S=rBq&-Zr$-B-Oora~S&+<5 zJni#AK9USq?R~trPh)J+|H20c+;u>!>kC)d2%KOO_*ehs$MXYy_Dm{E^K9x!e-gLp z$;s6;H~H6=&5z_!L*++QdIWAYI_&74<8zp32_^7_3YN6KH_ZN6*7m7ZNBV}KuIa!2 zap!P8ukZ`|hX#lGMh>1Bn>aji>qK#AXkv6|aAYX&jEoH(KYeg$X&^r~I5s@7|3q+N G?Ee6N(xQI= literal 0 HcmV?d00001 diff --git a/monasca_persister/openstack/common/loopingcall.py b/monasca_persister/openstack/common/loopingcall.py new file mode 100644 index 00000000..862f8b94 --- /dev/null +++ b/monasca_persister/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import time + +from eventlet import event +from eventlet import greenthread + +from openstack.common.gettextutils import _LE, _LW +from openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +# NOTE(zyluo): This lambda function was declared to avoid mocking collisions +# with time.time() called in the standard logging module +# during unittests. +_ts = lambda: time.time() + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCallBase. + + The poll-function passed to LoopingCallBase can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCallBase.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCallBase.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = _ts() + self.f(*self.args, **self.kw) + end = _ts() + if not self._running: + break + delay = end - start - interval + if delay > 0: + LOG.warn(_LW('task %(func_name)s run outlasted ' + 'interval by %(delay).2f sec'), + {'func_name': repr(self.f), 'delay': delay}) + greenthread.sleep(-delay if delay < 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug('Dynamic looping call %(func_name)s sleeping ' + 'for %(idle).02f seconds', + {'func_name': repr(self.f), 'idle': idle}) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/monasca_persister/openstack/common/service.py b/monasca_persister/openstack/common/service.py new file mode 100644 index 00000000..6227b6d9 --- /dev/null +++ b/monasca_persister/openstack/common/service.py @@ -0,0 +1,512 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging as std_logging +import os +import random +import signal +import sys +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from eventlet import event +from oslo.config import cfg + +from openstack.common import eventlet_backdoor +from openstack.common.gettextutils import _LE, _LI, _LW +from openstack.common import importutils +from openstack.common import log as logging +from openstack.common import systemd +from openstack.common import threadgroup + + +rpc = importutils.try_import('monasca.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_LE('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + systemd.notify_once() + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ + self.children = {} + self.sigcaught = None + self.running = True + self.wait_interval = wait_interval + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_LI('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Child caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_LE('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_LI('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_LI('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_LI('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_LI('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_LW('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(self.wait_interval) + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + systemd.notify_once() + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + while True: + self.handle_signal() + self._respawn_children() + # No signal means that stop was called. Don't clean up here. + if not self.sigcaught: + return + + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + except eventlet.greenlet.GreenletExit: + LOG.info(_LI("Wait called after thread killed. Cleaning up.")) + + self.stop() + + def stop(self): + """Terminate child processes and wait on each.""" + self.running = False + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + + def start(self): + pass + + def stop(self): + self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=1): + if workers is None or workers == 1: + launcher = ServiceLauncher() + launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + + return launcher diff --git a/monasca_persister/openstack/common/strutils.py b/monasca_persister/openstack/common/strutils.py new file mode 100644 index 00000000..12caaf4c --- /dev/null +++ b/monasca_persister/openstack/common/strutils.py @@ -0,0 +1,311 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import math +import re +import sys +import unicodedata + +import six + +from openstack.common.gettextutils import _ + + +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), +} + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +# NOTE(flaper87): The following globals are used by `mask_password` +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS_2 = [] +_SANITIZE_PATTERNS_1 = [] + +# NOTE(amrith): Some regular expressions have only one parameter, some +# have two parameters. Use different lists of patterns here. +_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+'] +_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(%(key)s\s+[\"\']).*?([\"\'])', + r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?' + '[\'"]).*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS_2: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS_2.append(reg_ex) + + for pattern in _FORMAT_PATTERNS_1: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS_1.append(reg_ex) + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False, default=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else returns the value specified by 'default'. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, six.string_types): + subject = six.text_type(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return default + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, six.text_type): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming text/bytes string using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, six.text_type): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + else: + return text + + +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. + + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' + + :param text: String input for bytes size conversion. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. + + """ + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) + if match: + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 + else: + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + substitute = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS_2: + message = re.sub(pattern, substitute, message) + + substitute = r'\g<1>' + secret + for pattern in _SANITIZE_PATTERNS_1: + message = re.sub(pattern, substitute, message) + + return message diff --git a/monasca_persister/openstack/common/strutils.pyc b/monasca_persister/openstack/common/strutils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2c8b3735a8b3a559b2e42d35877c7d6421c8c7a GIT binary patch literal 10179 zcmdT~TXP#ncJ2Ww-Xxh4b&GtRrA-4A2~v{1S}!b1rbSZbQleA?$g(L?;1DwahXiKe z>47C|$d|Qq+dq)XQ>wP|mVHZ}_c2e&56E*WRe4WU9#WM`zVGx5KuMI6+Em2~W%Tsr zboc4*bG~yj#+3J&TF00-)cT2x;(b!A zpUjFMND(tlDept&<&{@Z4J>C|HLwY%Bt5OXY1Kfl3FTc<-i-2Ql~+{Wobu+CcUgH? zRAWS~pHWYLt^9GdxcMu!epWsGRQYF6_(-jvOK0=XslZbHS>;_-*Uobx=hgSv(+kQ! zrFQ>7`4`BI_3a(&A$w2!k9(g!-22zo!*6d|5514n4_LxQ<-f`QypPonnCdN_srOUm zU6bNFQv8|Je4>89nZC;%CH>EZ;Ju8#Bt=_!pDORV`T@J~6E>9#Hz_r9NXE)to%pR{ z)BnzI+MOh521(#++YLQ?$8Wa%*sgcNS`tK|o*N7P6Mt(33mp?CRqRZf-II#d1d^y$ z1DtwVe^+Ym2Ejm9b6aX|2f<)gQ<0iV5GYjX4^drs{HQpyd2_?Nv@wTtX1efb_3rJ; zW^i-*W?_4OJ4p7wnEoK)m@7-Sf`36qe{giaJ@$L17kMkM2)|b(O%h=x?)bt;6iRJZ zhVS}XN|z%$^`UeOBTbcj^myaRhAwXEAr^`odUGZjMb`CNL3rQQ zIvLAK)i#Ul#LSZ@_L4WAX=9as7gdF+!k*uq)*JfLqt7;xPGcJVcq9LKV{&swzkd|T zqm9Xp{O0uBrJIE`tKWHsZsrJzJSiR(H~;YJL2t?#PR5xYe}+jnXE2=Jyfm%fB<=N- z?1oHqeZE)JMHxz%NGr{J;J(ClyvQC!5`*U_H^FqT4~l7*&P{1dmxTIh{rM}p%%a;y zzF55ZXrcJa&6$ln%B;gM>?<21gV-D-oe%B(9@yuN@=O7B({kWAgBoR?KT6*%{PcMw ziBkKDNDu>XEY&_|6pb|cpOLJDi66IPKe1g~CvgxqY!_-~Z%0wncSBJ)w07dq_Jd>x zdUwT+V*B#k*qDvKg|M4-`_654d^-qBriHDAd93Y+Rf%fa2rd`MMzbM~ZD|&t{@<6YXPQ5OS)}pW;H0*lN^zF7AYph1N6gUX5 zT4gZe6mnc(wH`;Us#&&a8AgllZ14KDq#*Q6^b;iWm_F9?Uf_e%X4jivjB1^hA0~P} z_S=yzHn2UN?YUain%9Y2+XJ5sf4&ukuCBR7AeIgQX>nFl?bk$YKa?)>=!_2N9~1KQ zT)UfGz}(m!!>Tat>XhZL$Qw~Z9k1~~sn^7TTlnd}MPd#gNU)ouZ2%N8Kafhqfl{GJ z$L^r|Ua8$Bcal?8CNYkh6s81C~2$5e7s?PE$y{mN3|pxOui!a4iabEF@?)?br>#tSbxpib}l{$Jl`K-Tl8w#Rx!+>K1S0BE7N7{D^b9UzYltf%^s{!2Xh->{ z@N51qB7vlBj;jMWnaEq}{8!eu;#-yrB-J#j`6-=WGy2UsF_Lq}lIqt(?KKVa^@bn% zPuuYgEXgSPkkVA{2QQqZou4Y;P^T%aZms6G6L%XRXwI4YTe7DL{f22RRs+q(=J$H6 z6=>(BG5~x6mBN~FDRl4@`V2ng2&+lA4G)A|CjQezisFN?xh=V7^u&*)OT~GxR4rGW zm9^VtacL-$P=BSC7w(k%O~ft{2qD^y0DFWq9mR?7Imj8t`Z8HyvqLw;5Q_2vvVb*4 zl_W2WV1V~YM^S0%#_Ms)4&5GzsS;O8ot&^Hauc}|)}Y1T5dOVmjap;Yacey1TmhZH zJeRn+-HcS@h?b0XxDoL{oN4;Mr~j6 zV`3P9(n@vDl5Y1nH4BK1p5=$(yJ`^Uyf>!ezf_iG83I^_l;CR^fpUR*r~d*PyC`Ox zCLh+0&_e;5-|i-G?~QQT9a?UCE2G%5gE~R8n+E`gQ6d-s7;u}h?|NN8TR=}Bg}xFj zzuW2!U$QG0KdD+S*=65H*n!#U zd_ZdgFXJIov7IDom*(fas3ul>Ckp3a|L3pHH-qiijk_@I@K^NQPSR?cnNw>}lJ(QX zxY@nnY|LVL4N6 zcEqL&NBRtoXbKzUhWW(a_U+Wc($vdNQZIh8R-n>4A269>@*xr!e|S?th(#J0ASm=E zHz5pIap=H{`e}rA98xDZ1_?5nuud5> z6CObZbH1;K(#w8nx9(TXlKzNWL04TJf6Ka6+<Qr8WMQlC;Pg7BV%>A5W_!{O>j_UD9!U~W zpk)x0p1@TXkd)DLpGkhy0Fm%3aAJ7=EsFvtm)~Ep>4t#}E*~+rzL3vkv8kS-(ZI4lfz51RMyE zN&JRLu@}Et>T85{2qdBNAUPOP`-mTZBc2CfW{80z>e)2RCe}!3crcO`5ro2LAC*A! zcWQrFdBds%>TiFi!nf(8<9-(unBe)@_eYfe2lhA^RpA9@;K7)v6-^fbS7*@ztcw4G z0W7>*Ixv7cVW^B;xxg*a{{%?76@}lyZ{;0G>W#oiMYNgqW`y|hMKKmNNMWgh&<^eh zy6aB6Esm>bG;Ra!(xID?)$Kx%OwZb@L7KUneY1+&X~pfVqLNlrOhr$>R=?mdNbd5- z%x(UdsqhzkVQyJo##(vh8P4`HMzZ;{Y319Sbq_;xLe5o5XIP68-vrr=a4{}-!lt?R zTecSg=jnZ<8v{3^9VBkkZu$wXMeO`)-td_MDlxERmkqoGVY`#aMy7ruqF9FFI4KSH zCv|)bIC9denY3iXRp@a*-;LdE{fgekE34wn#cg7wGTRH>QR_Fr>x(i#ZhL!k81;}x zG;Bn~E~MtAMAs^4AzqU{rJgIeh8rbZAYfOHo}xci+L58Uw)+uL-`CsWH69&3aaWC- znhL3e$t!8Yp))GklT&)qT$V8|?%$1Xi{pmt?O=p@!Z17QpDGFOSzPSdl$J2tStK3p zJUSH^HzvGvd$6`rsg}PfSC;NpSJsyAiKn*ecg2a52+z4pT7r|@;z&*uPjZbo>a>NI zcJw4Un~iwiSz2ECy1MlB{d;RmYZW2DS&2i4VRj@_^Mj_SxnQ|?VTVLTLf-!2OOI__ zEQ$LV`;Dsq)cF|mIbV`Ly0|SDmnlid^ZP@=C9fk)=Ca|Ozb5u!9Nd^gswPoYB=PTA zMc-Xr5GNDX;gmIwaPu6}bFdPp;D8QUqqz%6&se>vlg_8uj0Q5sN2I?8XcvFaTr|Q+ z&?K+`b&P6V7t**Yq%CSv8A5Y4gvM;NI!idd7n zYnGEra|+mW@g&mcbs$x?72zr{jKU({D(u=0qC&WWF>by1h-AP6*wA)sz7}o506X0_ zZnf~DBo!;6ZU?;S!{yxKt{XNYc!*oN*=Ypz?iR0^FRF*ilO0n3ixZbCcdR#}vo zahsjjehpHQr!8#xTDy(sZhq2AXd%3i)I6n^*K*wrgNY$JagcN|hA~{vI}v7%k};bS zFQ)h3P->XwXV?N<(Bk*4cp znJsG)EKLealX&Oj20hn?VJ7o0#Xk%KB1ub?KY3U~RwZ z{z9d)4wpI3kA-g$^y706g9( z4J~g^9bV$fvB*=J5a9m?ad;yjJ;SIliM;GlwTk=dYSmd|#~mheDYD7j6p7wJ(tDkr zGq2OU3uCw=FVx0oze8G@B2cvP`0gwt?_6Rs%cRQ0Wm0F-U=kpa+uZ8%J?HL1rP>pO zD@Rvf6*Y01?8l>DyjNMcbH^!XWw(uIbkBJYWv9r5%p2DNZ<}(JkU|K&z=Cm~g~bsAsWIyW%-#`sUh^8?oSd*gHC7yk>B6kyK) literal 0 HcmV?d00001 diff --git a/monasca_persister/openstack/common/systemd.py b/monasca_persister/openstack/common/systemd.py new file mode 100644 index 00000000..bbb47dfe --- /dev/null +++ b/monasca_persister/openstack/common/systemd.py @@ -0,0 +1,106 @@ +# Copyright 2012-2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper module for systemd service readiness notification. +""" + +import os +import socket +import sys + +from openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def _abstractify(socket_name): + if socket_name.startswith('@'): + # abstract namespace socket + socket_name = '\0%s' % socket_name[1:] + return socket_name + + +def _sd_notify(unset_env, msg): + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + try: + sock.connect(_abstractify(notify_socket)) + sock.sendall(msg) + if unset_env: + del os.environ['NOTIFY_SOCKET'] + except EnvironmentError: + LOG.debug("Systemd notification failed", exc_info=True) + finally: + sock.close() + + +def notify(): + """Send notification to Systemd that service is ready. + + For details see + http://www.freedesktop.org/software/systemd/man/sd_notify.html + """ + _sd_notify(False, 'READY=1') + + +def notify_once(): + """Send notification once to Systemd that service is ready. + + Systemd sets NOTIFY_SOCKET environment variable with the name of the + socket listening for notifications from services. + This method removes the NOTIFY_SOCKET environment variable to ensure + notification is sent only once. + """ + _sd_notify(True, 'READY=1') + + +def onready(notify_socket, timeout): + """Wait for systemd style notification on the socket. + + :param notify_socket: local socket address + :type notify_socket: string + :param timeout: socket timeout + :type timeout: float + :returns: 0 service ready + 1 service not ready + 2 timeout occurred + """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.settimeout(timeout) + sock.bind(_abstractify(notify_socket)) + try: + msg = sock.recv(512) + except socket.timeout: + return 2 + finally: + sock.close() + if 'READY=1' in msg: + return 0 + else: + return 1 + + +if __name__ == '__main__': + # simple CLI for testing + if len(sys.argv) == 1: + notify() + elif len(sys.argv) >= 2: + timeout = float(sys.argv[1]) + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + retval = onready(notify_socket, timeout) + sys.exit(retval) diff --git a/monasca_persister/openstack/common/threadgroup.py b/monasca_persister/openstack/common/threadgroup.py new file mode 100644 index 00000000..83df8b38 --- /dev/null +++ b/monasca_persister/openstack/common/threadgroup.py @@ -0,0 +1,147 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import threading + +import eventlet +from eventlet import greenpool + +from openstack.common import log as logging +from openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + + +class ThreadGroup(object): + """The point of the ThreadGroup class is to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + return th + + def thread_done(self, thread): + self.threads.remove(thread) + + def _stop_threads(self): + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + def stop_timers(self): + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def stop(self, graceful=False): + """stop function has the option of graceful=True/False. + + * In case of graceful=True, wait for all threads to be finished. + Never kill threads. + * In case of graceful=False, kill threads immediately. + """ + self.stop_timers() + if graceful: + # In case of graceful=True, wait for all threads to be + # finished, never kill threads + self.wait() + else: + # In case of graceful=False(Default), kill threads + # immediately + self._stop_threads() + + def wait(self): + for x in self.timers: + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + continue + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/monasca_persister/openstack/common/timeutils.py b/monasca_persister/openstack/common/timeutils.py new file mode 100644 index 00000000..c48da95f --- /dev/null +++ b/monasca_persister/openstack/common/timeutils.py @@ -0,0 +1,210 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(six.text_type(e)) + except TypeError as e: + raise ValueError(six.text_type(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns an iso8601 formatted date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert utcnow.override_time is not None + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/monasca_persister/openstack/common/timeutils.pyc b/monasca_persister/openstack/common/timeutils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ef1e167d273d251208b99fa48190e1878c4c3d7 GIT binary patch literal 8667 zcmd5>O>^7E8D5YQCDD>4%TjE`ZUQ?|!=x6Kq)F_mX&OhC8+U5i1In?ZY0E+YC_y3t z2D^++PkgB}({nGm^au3bLr?t!o&J$dXL{1KTkJeFPNwpod55dYMwO=aavf3{f@|4=2D&%Rk zKdnNHR#b@o85LF3{w$Y@sw$jTUt*C{Dy*n4@hkJvkI@onlJEoSsA6CD)mGubfM?iQC~Ppzp6}0C1qvG>Zzlilw@d1Wgq9lu^0$Lm;=i` zDXT9qN92=|$^t$khR}1BH}iq{`}#7PhRNdOFmCl-!zYgu_g3SMd+oh9-*Q`N*6~dp zzpB)|y=?I7G(8g}=|eLMQJbvA`wdmm#0p%Xxp(XKPV?p+@AmaQ^U^a0 znvHuOHFmb|?B0+y&Eq&}rMZ~+Sdly3O_NBr3_U328#9MLx_6+XAWcH))JFc8$2ANo zY4jsVoA-2->CG^XOq8|z;pUAr=yjsR=*=wZrh4NrHb=dKdXRQDwef=!JoRC;*+~;$ z2mS^Ou49c`i=O7NC+$Xw+_H(AalwR2S@{23I6w_E zD>5zmu4#^X07q%m_}kh=bJx#wWL=<@rJZj$L2mZ^p=e!G#cf^Q)(jCWaw)6qWLcWY z%GQtY`fc--8ogbO{7MwQCW z8E4r!?<_hJ+|(++V$qi&d`EE8z%{yoUPI$Wrk5o;4?PP(46?c?$K#?NRcS971rnXD zj+qoS(L-oEnD81!0)=(^fok{wqMEZ1=+}j_(~}8KqK+S6a1Y9G{Sj?&S@!@@3ZOD6 zhQ#&IX%!?`RCE=<)a6Z9VXIivBi2ufwuVlX>+Gl4oTiyoDJZ~UpszR4&8%RPgyaSw z`^l`zF0#MDu_B*8RSwj7zER`d`bTJX3FqzjaUSMCdOtgeP3C77`yT^jT__b$?k5|sgRFRRd;8!dLxO-U->A2+{M94^xw41g=&<}IuC$3F=+L}fz zkoWA?1QdwPv5uLZk|6Syxp_hrybRf1z*l=0`D4t>Vn+)uFLI_V0kwot+xQli=>m2q z-eriBVoJtOtec{lCN=X5bmW#?b}p2bO9RyTe_E1WAjW*QB?(_&v!vewkDwuw=;5~- zQdDa#=)J=GX*cgxHqWw^w~kS3B60}%iAl7OJhE8m6Kr$bLg3Z9(D$CV|v~82yc;KdA?+n5pAEm zY4mS!k>};b@=__lrF;e?bO)DK|JgOwU&&>gTB!W{pIrmZEDOziM>&pTCpR9TYOz+b zDJtiebl^Kyt^Pqf8nuXJ%XVk=n7gQP7Z!`&fF$&$n|2LB`?>*q9rTRliypCR2t?0c z%d%@cdBFjg*Egw({P~*PszjdU4j36FLt`2w zB0_6jDVm{m=<%s2v?&xQ_q~5$R_=SK;>X;rT6OWc?VljI8kGEG-mdSWqGZ-Iwirq? zbhS|dBJW07l(w$8q)EO(H+5RKt8AeV@;mMrcx(;GDafi##4eT_U99fjYPm^jWGCFs zH$R6UV#yUZj9PxLZ8QlQcH#!TEJG11WX2y;rW5*bkN_AjeP7xJMSzgkkLjKQg})>TNKT~_z+CoZsR zO0eqF9IK{mZGE-WM8!N-3jYwlb3Oc=q^U4w7$w#fdf-!sFndv7)Twu&NJt{%sAcnp zK$gbw15b;j#PwFNzAZ)8`9#ck;lz|~WE;MHcxxJYxZj~SFLPG`XiF$`7lwc~U{yjK zCH`!lfC)g60cegKu$8&WD`=jj-Glys)Jh)6U70nc^=B%~y7D9uP#SbhXE_i5J^Bmi zf+(K>zac`yj{*K;Sakv2UerNANQmcwQTQL6j7l8g>f<5({PUk-Js-3qKO2wiuh3oS zf|1SdBEH+~e}D$Q^0l*1s0Io`Kb^^*cj7oxiY-=JQ!<28Pn>>X4o;>kBFymMnLV?} z1v3qycDjdO5R^wA&AP#c_FK^)h~0-c{&PEVkflXUWf6ThHPM!9k0Z(1SvhsPQFz4# z0U>51V|><&Go)hFXZFOHHM1nzx&XRxm(cHnt15-_L31P&vJVA`a0EUo^sy5sAT=V| zO_)=>W4I|P>MTcPz=d&OydOjG-e>a@HXpFz>4Enlo9k#KT4WPoypw|ctU^0DH9Yd$ z?I!m12VCna8a2&x(Ki4u0{?Up@)vQEyATNf5;m|ed}GbJ1==<17iib4W1wBLo`H7F zx(3=c>ln>8w*7{F73d7&8DWqMJ>p=kekI7QnlMAxHf)M)MMvlq8H>El?z;&V$H2jF>kc zf~~}G@5B5o3GYlKG`7=D7cMV2H*z1&r1B^nTY?$)ibNLJS0)P-Nw`?ot@(IK;c!Z)=wjy zyk^`ZABc;B{3s1!^ASt(J=Wbm`jfZo?_F_GtOq^5(j(LF_(>1o)3>e=^EfzOLNvrg zp;*p3;}1VtR5gm;&)B?;MgkjXuH|?|d_%lV&ecY+T3eFxF>aX<2#{y^3-)7iwo-OZ zgTk%aLr%u~fW-0S1X2DJtyHDV!E!=*j^SUNbpixVSf~;v^xx6k;JYEaL;4|`97uaa>Je-l#Phx z^Aq9=a#oBJQ*P<6*s2AO*)^xeBLNw0Hp4V% zHoXmWSn4gO(##k3K$7s{(PxD4-QDqS?riUkzU;N-52xMq+d8r*66biAyyLn^i5l}> zV?(F&-eg0E^4?+d1{)p&dQt%J^@X>^<_4RaY;LiU=)41Is9uqtZ_1s;hTiX_VXqy1 yz@kd?Au94}VV><&1@C|><;D5A>f+4&^xW)RHUBp~SDrrwd3tVoetwS9!hZpyAf~eb literal 0 HcmV?d00001 diff --git a/monasca_persister/persister.conf b/monasca_persister/persister.conf new file mode 100644 index 00000000..f57a74cd --- /dev/null +++ b/monasca_persister/persister.conf @@ -0,0 +1,24 @@ +[DEFAULT] +log_file = persister.log +log_dir = . +log_level = DEBUG + +[kafka] +#Comma separated list of hosts. +uri = 192.168.10.4:9092 +alarm_history_group_id = 1_alarm-state-transitions +alarm_history_topic = alarm-state-transitions +alarm_history_consumer_id = 1 +alarm_history_client_id = 1 +metrics_group_id = 1_metrics +metrics_topic = metrics +metrics_consumer_id = 1 +metrics_client_id = 1 + +[influxdb] +database_name = test +ip_address = 192.168.10.4 +port = 8086 +user = root +password = root + diff --git a/monasca_persister/persister.py b/monasca_persister/persister.py new file mode 100644 index 00000000..fbe88a24 --- /dev/null +++ b/monasca_persister/persister.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Persister + The Persister reads metrics and alarms from Kafka and then stores them + in InfluxDB. + + Start the perister as stand-alone process by running 'persister.py + --config-file ' + + Also able to use Openstack service to start the persister. +""" +import threading +from kafka import KafkaClient, SimpleConsumer +from influxdb import InfluxDBClient +import json +import urllib +import sys +from oslo.config import cfg + +from openstack.common import log +from openstack.common import service as os_service + +import service + +LOG = log.getLogger(__name__) + +kafka_opts = [ + cfg.StrOpt('uri'), + cfg.StrOpt('alarm_history_group_id'), + cfg.StrOpt('alarm_history_topic'), + cfg.StrOpt('alarm_history_consumer_id'), + cfg.StrOpt('alarm_history_client_id'), + cfg.StrOpt('metrics_group_id'), + cfg.StrOpt('metrics_topic'), + cfg.StrOpt('metrics_consumer_id'), + cfg.StrOpt('metrics_client_id') +] + +kafka_group = cfg.OptGroup(name='kafka', + title='kafka') + +cfg.CONF.register_group(kafka_group) +cfg.CONF.register_opts(kafka_opts, kafka_group) + +influxdb_opts = [ + cfg.StrOpt('database_name'), + cfg.StrOpt('ip_address'), + cfg.StrOpt('port'), + cfg.StrOpt('user'), + cfg.StrOpt('password') +] + +influxdb_group = cfg.OptGroup(name='influxdb', + title='influxdb') +cfg.CONF.register_group(influxdb_group) +cfg.CONF.register_opts(influxdb_opts, influxdb_group) + +cfg.CONF(sys.argv[1:]) + +log_levels = (cfg.CONF.default_log_levels) +cfg.set_defaults(log.log_opts, default_log_levels=log_levels) +log.setup("monasca-perister") + + +def main(): + try: + metric_persister = MetricPersister(cfg.CONF) + metric_persister.start() + + alarm_persister = AlarmPersister(cfg.CONF) + alarm_persister.start() + except Exception: + log.exception('Persister encountered fatal error. Shutting down.') + + +class Persister(os_service.Service): + """Class used with Openstack service. + """ + + def __init__(self, threads=1): + super(Persister, self).__init__(threads) + + def start(self): + main() + + +class AlarmPersister(threading.Thread): + """Class for persisting alarms. + """ + + def __init__(self, conf): + threading.Thread.__init__(self) + self.conf = conf + + def run(self): + + try: + + kafka = KafkaClient(self.conf.kafka.uri) + consumer = SimpleConsumer(kafka, + self.conf.kafka.alarm_history_group_id, + self.conf.kafka.alarm_history_topic, + auto_commit=True) + + influxdb_client = InfluxDBClient(self.conf.influxdb.ip_address, + self.conf.influxdb.port, + self.conf.influxdb.user, + self.conf.influxdb.password, + self.conf.influxdb.database_name) + + for message in consumer: + LOG.debug(message.message) + + decoded = json.loads(message.message.value) + LOG.debug(json.dumps(decoded, sort_keys=True, indent=4)) + + actions_enabled = decoded['alarm-transitioned'][ + 'actionsEnabled'] + LOG.debug('actions enabled: %s', actions_enabled) + + alarm_description = decoded['alarm-transitioned'][ + 'alarmDescription'] + LOG.debug('alarm description: %s', alarm_description) + + alarm_id = decoded['alarm-transitioned']['alarmId'] + LOG.debug('alarm id: %s', alarm_id) + + alarm_name = decoded['alarm-transitioned']['alarmName'] + LOG.debug('alarm name: %s', alarm_name) + + new_state = decoded['alarm-transitioned']['newState'] + LOG.debug('new state: %s', new_state) + + old_state = decoded['alarm-transitioned']['oldState'] + LOG.debug('old state: %s', old_state) + + state_changeReason = decoded['alarm-transitioned'][ + 'stateChangeReason'] + LOG.debug('state change reason: %s', state_changeReason) + + tenant_id = decoded['alarm-transitioned']['tenantId'] + LOG.debug('tenant id: %s', tenant_id) + + time_stamp = decoded['alarm-transitioned']['timestamp'] + LOG.debug('time stamp: %s', time_stamp) + + json_body = [ + {"points": [ + [time_stamp, '{}', tenant_id.encode('utf8'), + alarm_id.encode('utf8'), old_state.encode('utf8'), + new_state.encode('utf8'), + state_changeReason.encode('utf8')]], + "name": 'alarm_state_history', + "columns": ["time", "reason_data", "tenant_id", + "alarm_id", "old_state", "new_state", + "reason"]}] + + influxdb_client.write_points(json_body) + + except Exception: + LOG.exception( + 'Persister encountered fatal exception processing alarms') + raise + + +class MetricPersister(threading.Thread): + """Class for persisting metrics. + """ + + def __init__(self, conf): + threading.Thread.__init__(self) + self.conf = conf + + def run(self): + + try: + + kafka = KafkaClient(self.conf.kafka.uri) + consumer = SimpleConsumer(kafka, + self.conf.kafka.metrics_group_id, + self.conf.kafka.metrics_topic, + auto_commit=True) + + influxdb_client = InfluxDBClient(self.conf.influxdb.ip_address, + self.conf.influxdb.port, + self.conf.influxdb.user, + self.conf.influxdb.password, + self.conf.influxdb.database_name) + + for message in consumer: + LOG.debug(message.message.value) + + decoded = json.loads(message.message.value) + LOG.debug(json.dumps(decoded, sort_keys=True, indent=4)) + + metric_name = decoded['metric']['name'] + LOG.debug('name: %s', metric_name) + + creation_time = decoded['creation_time'] + LOG.debug('creation time: %s', creation_time) + + region = decoded['meta']['region'] + LOG.debug('region: %s', region) + + tenant_id = decoded['meta']['tenantId'] + LOG.debug('tenant id: %s', tenant_id) + + dimensions = {} + if 'dimensions' in decoded['metric']: + for dimension_name in decoded['metric']['dimensions']: + dimensions[dimension_name] = ( + decoded['metric']['dimensions'][dimension_name]) + LOG.debug('dimension %s : %s', dimension_name, + dimensions[dimension_name]) + + time_stamp = decoded['metric']['timestamp'] + LOG.debug('timestamp %s', time_stamp) + + value = decoded['metric']['value'] + LOG.debug('value: %s', value) + + url_encoded_serie_name = ( + urllib.quote(metric_name.encode('utf8'), safe='') + + '?' + urllib.quote(tenant_id.encode('utf8'), safe='') + + '&' + urllib.quote(region.encode('utf8'), safe='')) + + for dimension_name in dimensions: + url_encoded_serie_name += ('&' + + urllib.quote( + dimension_name.encode('utf8'), safe='') + + '=' + + urllib.quote( + dimensions[dimension_name].encode('utf8'), safe='')) + + LOG.debug("url_encoded_serie_name: %s", url_encoded_serie_name) + + json_body = [ + + + {"points": [[value, time_stamp]], + "name": url_encoded_serie_name, + "columns": ["value", "time"]}] + + LOG.debug(json_body) + + influxdb_client.write_points(json_body) + + except Exception: + LOG.exception( + 'Persister encountered fatal exception processing metrics') + raise + + +def mainService(): + """Method to use with Openstack service. + """ + + service.prepare_service() + launcher = os_service.ServiceLauncher() + launcher.launch_service(Persister()) + launcher.wait() + +# Used if run without Openstack service. +if __name__ == "__main__": + sys.exit(main()) + diff --git a/monasca_persister/service.py b/monasca_persister/service.py new file mode 100644 index 00000000..7c372499 --- /dev/null +++ b/monasca_persister/service.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg +import sys +from openstack.common import gettextutils +from openstack.common import log + +cfg.CONF.import_opt('default_log_levels', 'openstack.common.log') + +LOG = log.getLogger(__name__) + +def prepare_service(argv=None): + gettextutils.install('openstack') + gettextutils.enable_lazy() + log_levels = (cfg.CONF.default_log_levels) + cfg.set_defaults(log.log_opts, default_log_levels=log_levels) + if argv is None: + argv = sys.argv + cfg.CONF(argv[1:], project='persister') + log.setup('persister') + LOG.info('Service has started!') \ No newline at end of file diff --git a/monasca_persister/test/__init__.py b/monasca_persister/test/__init__.py new file mode 100644 index 00000000..58cbdea8 --- /dev/null +++ b/monasca_persister/test/__init__.py @@ -0,0 +1 @@ +__author__ = 'dieterlyd'