Sync with latest oslo-incubator
Removed modules from openstack-common.conf which have been removed from the oslo-incubator repository. Change-Id: I5d3a2e5c99cbe662ecbfd052466d6d6091b9da63
This commit is contained in:
parent
36b8aeb917
commit
d8015d309c
@ -1,17 +0,0 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
|
||||
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
45
barbican/openstack/common/_i18n.py
Normal file
45
barbican/openstack/common/_i18n.py
Normal file
@ -0,0 +1,45 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""oslo.i18n integration module.
|
||||
|
||||
See http://docs.openstack.org/developer/oslo.i18n/usage.html
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
import oslo.i18n
|
||||
|
||||
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
|
||||
# application name when this module is synced into the separate
|
||||
# repository. It is OK to have more than one translation function
|
||||
# using the same domain, since there will still only be one message
|
||||
# catalog.
|
||||
_translators = oslo.i18n.TranslatorFactory(domain='barbican')
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
||||
except ImportError:
|
||||
# NOTE(dims): Support for cases where a project wants to use
|
||||
# code from oslo-incubator, but is not ready to be internationalized
|
||||
# (like tempest)
|
||||
_ = _LI = _LW = _LE = _LC = lambda x: x
|
@ -1,5 +1,3 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
@ -18,21 +16,21 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import copy
|
||||
import errno
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import pprint
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
import eventlet.backdoor
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from barbican.openstack.common.gettextutils import _ # noqa
|
||||
from barbican.openstack.common import log as logging
|
||||
from barbican.openstack.common._i18n import _LI
|
||||
|
||||
help_for_backdoor_port = (
|
||||
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
||||
@ -43,7 +41,6 @@ help_for_backdoor_port = (
|
||||
"chosen port is displayed in the service's log file.")
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.StrOpt('backdoor_port',
|
||||
default=None,
|
||||
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
|
||||
]
|
||||
|
||||
@ -52,6 +49,12 @@ CONF.register_opts(eventlet_backdoor_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Entry point for oslo.config-generator.
|
||||
"""
|
||||
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
|
||||
|
||||
|
||||
class EventletBackdoorConfigValueError(Exception):
|
||||
def __init__(self, port_range, help_msg, ex):
|
||||
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||
@ -66,7 +69,7 @@ def _dont_use_this():
|
||||
|
||||
|
||||
def _find_objects(t):
|
||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
||||
return [o for o in gc.get_objects() if isinstance(o, t)]
|
||||
|
||||
|
||||
def _print_greenthreads():
|
||||
@ -139,8 +142,10 @@ def initialize_if_enabled():
|
||||
# In the case of backdoor port being zero, a port number is assigned by
|
||||
# listen(). In any case, pull the port number out here.
|
||||
port = sock.getsockname()[1]
|
||||
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||
{'port': port, 'pid': os.getpid()})
|
||||
LOG.info(
|
||||
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||
{'port': port, 'pid': os.getpid()}
|
||||
)
|
||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||
locals=backdoor_locals)
|
||||
return port
|
||||
|
@ -15,24 +15,27 @@
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import stat
|
||||
import tempfile
|
||||
|
||||
from barbican.openstack.common import excutils
|
||||
from barbican.openstack.common import log as logging
|
||||
from oslo.utils import excutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_FILE_CACHE = {}
|
||||
DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
def ensure_tree(path, mode=DEFAULT_MODE):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
:param mode: Directory creation permissions
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
os.makedirs(path, mode)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
@ -50,8 +53,8 @@ def read_cached_file(filename, force_reload=False):
|
||||
"""
|
||||
global _FILE_CACHE
|
||||
|
||||
if force_reload and filename in _FILE_CACHE:
|
||||
del _FILE_CACHE[filename]
|
||||
if force_reload:
|
||||
delete_cached_file(filename)
|
||||
|
||||
reloaded = False
|
||||
mtime = os.path.getmtime(filename)
|
||||
@ -66,6 +69,17 @@ def read_cached_file(filename, force_reload=False):
|
||||
return (reloaded, cache_info['data'])
|
||||
|
||||
|
||||
def delete_cached_file(filename):
|
||||
"""Delete cached file if present.
|
||||
|
||||
:param filename: filename to delete
|
||||
"""
|
||||
global _FILE_CACHE
|
||||
|
||||
if filename in _FILE_CACHE:
|
||||
del _FILE_CACHE[filename]
|
||||
|
||||
|
||||
def delete_if_exists(path, remove=os.unlink):
|
||||
"""Delete a file, but ignore file not found error.
|
||||
|
||||
@ -99,7 +113,7 @@ def remove_path_on_error(path, remove=delete_if_exists):
|
||||
def file_open(*args, **kwargs):
|
||||
"""Open file
|
||||
|
||||
see built-in file() documentation for more details
|
||||
see built-in open() documentation for more details
|
||||
|
||||
Note: The reason this is kept in a separate module is to easily
|
||||
be able to provide a stub module that doesn't alter system
|
||||
|
@ -1,5 +1,3 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
@ -17,32 +15,37 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from barbican.openstack.common.gettextutils import _ # noqa
|
||||
from barbican.openstack.common import log as logging
|
||||
from barbican.openstack.common import timeutils
|
||||
from barbican.openstack.common._i18n import _LE, _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
|
||||
# with time.time() called in the standard logging module
|
||||
# during unittests.
|
||||
_ts = lambda: time.time()
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCall.
|
||||
"""Exception to break out and stop a LoopingCallBase.
|
||||
|
||||
The poll-function passed to LoopingCall can raise this exception to
|
||||
The poll-function passed to LoopingCallBase can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCall.wait()
|
||||
this return-value will be returned by LoopingCallBase.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCall.wait() should return."""
|
||||
""":param retvalue: Value that LoopingCallBase.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
@ -74,21 +77,22 @@ class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
start = timeutils.utcnow()
|
||||
start = _ts()
|
||||
self.f(*self.args, **self.kw)
|
||||
end = timeutils.utcnow()
|
||||
end = _ts()
|
||||
if not self._running:
|
||||
break
|
||||
delay = interval - timeutils.delta_seconds(start, end)
|
||||
if delay <= 0:
|
||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||
-delay)
|
||||
greenthread.sleep(delay if delay > 0 else 0)
|
||||
delay = end - start - interval
|
||||
if delay > 0:
|
||||
LOG.warn(_LW('task %(func_name)r run outlasted '
|
||||
'interval by %(delay).2f sec'),
|
||||
{'func_name': self.f, 'delay': delay})
|
||||
greenthread.sleep(-delay if delay < 0 else 0)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in fixed duration looping call'))
|
||||
LOG.exception(_LE('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
@ -100,11 +104,6 @@ class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
return self.done
|
||||
|
||||
|
||||
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||
# in the I release
|
||||
LoopingCall = FixedIntervalLoopingCall
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
@ -128,14 +127,15 @@ class DynamicLoopingCall(LoopingCallBase):
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||
'seconds'), idle)
|
||||
LOG.debug('Dynamic looping call %(func_name)r sleeping '
|
||||
'for %(idle).02f seconds',
|
||||
{'func_name': self.f, 'idle': idle})
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in dynamic looping call'))
|
||||
LOG.exception(_LE('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
|
@ -1,3 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
@ -22,22 +24,43 @@ string written in the new policy language.
|
||||
In the list-of-lists representation, each check inside the innermost
|
||||
list is combined as with an "and" conjunction--for that check to pass,
|
||||
all the specified checks must pass. These innermost lists are then
|
||||
combined as with an "or" conjunction. This is the original way of
|
||||
expressing policies, but there now exists a new way: the policy
|
||||
language.
|
||||
|
||||
In the policy language, each check is specified the same way as in the
|
||||
list-of-lists representation: a simple "a:b" pair that is matched to
|
||||
the correct code to perform that check. However, conjunction
|
||||
operators are available, allowing for more expressiveness in crafting
|
||||
policies.
|
||||
|
||||
As an example, take the following rule, expressed in the list-of-lists
|
||||
representation::
|
||||
combined as with an "or" conjunction. As an example, take the following
|
||||
rule, expressed in the list-of-lists representation::
|
||||
|
||||
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
|
||||
|
||||
In the policy language, this becomes::
|
||||
This is the original way of expressing policies, but there now exists a
|
||||
new way: the policy language.
|
||||
|
||||
In the policy language, each check is specified the same way as in the
|
||||
list-of-lists representation: a simple "a:b" pair that is matched to
|
||||
the correct class to perform that check::
|
||||
|
||||
+===========================================================================+
|
||||
| TYPE | SYNTAX |
|
||||
+===========================================================================+
|
||||
|User's Role | role:admin |
|
||||
+---------------------------------------------------------------------------+
|
||||
|Rules already defined on policy | rule:admin_required |
|
||||
+---------------------------------------------------------------------------+
|
||||
|Against URL's¹ | http://my-url.org/check |
|
||||
+---------------------------------------------------------------------------+
|
||||
|User attributes² | project_id:%(target.project.id)s |
|
||||
+---------------------------------------------------------------------------+
|
||||
|Strings | <variable>:'xpto2035abc' |
|
||||
| | 'myproject':<variable> |
|
||||
+---------------------------------------------------------------------------+
|
||||
| | project_id:xpto2035abc |
|
||||
|Literals | domain_id:20 |
|
||||
| | True:%(user.enabled)s |
|
||||
+===========================================================================+
|
||||
|
||||
¹URL checking must return 'True' to be valid
|
||||
²User attributes (obtained through the token): user_id, domain_id or project_id
|
||||
|
||||
Conjunction operators are available, allowing for more expressiveness
|
||||
in crafting policies. So, in the policy language, the previous check in
|
||||
list-of-lists becomes::
|
||||
|
||||
role:admin or (project_id:%(project_id)s and role:projectadmin)
|
||||
|
||||
@ -46,26 +69,16 @@ policy rule::
|
||||
|
||||
project_id:%(project_id)s and not role:dunce
|
||||
|
||||
It is possible to perform policy checks on the following user
|
||||
attributes (obtained through the token): user_id, domain_id or
|
||||
project_id::
|
||||
|
||||
domain_id:<some_value>
|
||||
|
||||
Attributes sent along with API calls can be used by the policy engine
|
||||
(on the right side of the expression), by using the following syntax::
|
||||
|
||||
<some_value>:user.id
|
||||
<some_value>:%(user.id)s
|
||||
|
||||
Contextual attributes of objects identified by their IDs are loaded
|
||||
from the database. They are also available to the policy engine and
|
||||
can be checked through the `target` keyword::
|
||||
|
||||
<some_value>:target.role.name
|
||||
|
||||
All these attributes (related to users, API calls, and context) can be
|
||||
checked against each other or against constants, be it literals (True,
|
||||
<a_number>) or strings.
|
||||
<some_value>:%(target.role.name)s
|
||||
|
||||
Finally, two special policy checks should be mentioned; the policy
|
||||
check "@" will always accept an access, and the policy check "!" will
|
||||
@ -77,17 +90,19 @@ as it allows particular rules to be explicitly disabled.
|
||||
|
||||
import abc
|
||||
import ast
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.serialization import jsonutils
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import six.moves.urllib.request as urlrequest
|
||||
|
||||
from barbican.openstack.common import fileutils
|
||||
from barbican.openstack.common.gettextutils import _, _LE
|
||||
from barbican.openstack.common import jsonutils
|
||||
from barbican.openstack.common import log as logging
|
||||
from barbican.openstack.common._i18n import _, _LE, _LI
|
||||
|
||||
|
||||
policy_opts = [
|
||||
@ -98,6 +113,14 @@ policy_opts = [
|
||||
default='default',
|
||||
help=_('Default rule. Enforced when a requested rule is not '
|
||||
'found.')),
|
||||
cfg.MultiStrOpt('policy_dirs',
|
||||
default=['policy.d'],
|
||||
help=_('Directories where policy configuration files are '
|
||||
'stored. They can be relative to any directory '
|
||||
'in the search path defined by the config_dir '
|
||||
'option, or absolute paths. The file defined by '
|
||||
'policy_file must exist for these directories to '
|
||||
'be searched.')),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -108,6 +131,11 @@ LOG = logging.getLogger(__name__)
|
||||
_checks = {}
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Entry point for oslo.config-generator."""
|
||||
return [(None, copy.deepcopy(policy_opts))]
|
||||
|
||||
|
||||
class PolicyNotAuthorized(Exception):
|
||||
|
||||
def __init__(self, rule):
|
||||
@ -184,16 +212,19 @@ class Enforcer(object):
|
||||
:param default_rule: Default rule to use, CONF.default_rule will
|
||||
be used if none is specified.
|
||||
:param use_conf: Whether to load rules from cache or config file.
|
||||
:param overwrite: Whether to overwrite existing rules when reload rules
|
||||
from config file.
|
||||
"""
|
||||
|
||||
def __init__(self, policy_file=None, rules=None,
|
||||
default_rule=None, use_conf=True):
|
||||
self.rules = Rules(rules, default_rule)
|
||||
default_rule=None, use_conf=True, overwrite=True):
|
||||
self.default_rule = default_rule or CONF.policy_default_rule
|
||||
self.rules = Rules(rules, self.default_rule)
|
||||
|
||||
self.policy_path = None
|
||||
self.policy_file = policy_file or CONF.policy_file
|
||||
self.use_conf = use_conf
|
||||
self.overwrite = overwrite
|
||||
|
||||
def set_rules(self, rules, overwrite=True, use_conf=False):
|
||||
"""Create a new Rules object based on the provided dict of rules.
|
||||
@ -216,6 +247,7 @@ class Enforcer(object):
|
||||
def clear(self):
|
||||
"""Clears Enforcer rules, policy's cache and policy's path."""
|
||||
self.set_rules({})
|
||||
fileutils.delete_cached_file(self.policy_path)
|
||||
self.default_rule = None
|
||||
self.policy_path = None
|
||||
|
||||
@ -224,7 +256,7 @@ class Enforcer(object):
|
||||
|
||||
Policy file is cached and will be reloaded if modified.
|
||||
|
||||
:param force_reload: Whether to overwrite current rules.
|
||||
:param force_reload: Whether to reload rules from config file.
|
||||
"""
|
||||
|
||||
if force_reload:
|
||||
@ -232,31 +264,55 @@ class Enforcer(object):
|
||||
|
||||
if self.use_conf:
|
||||
if not self.policy_path:
|
||||
self.policy_path = self._get_policy_path()
|
||||
self.policy_path = self._get_policy_path(self.policy_file)
|
||||
|
||||
self._load_policy_file(self.policy_path, force_reload,
|
||||
overwrite=self.overwrite)
|
||||
for path in CONF.policy_dirs:
|
||||
try:
|
||||
path = self._get_policy_path(path)
|
||||
except cfg.ConfigFilesNotFoundError:
|
||||
LOG.info(_LI("Can not find policy directory: %s"), path)
|
||||
continue
|
||||
self._walk_through_policy_directory(path,
|
||||
self._load_policy_file,
|
||||
force_reload, False)
|
||||
|
||||
@staticmethod
|
||||
def _walk_through_policy_directory(path, func, *args):
|
||||
# We do not iterate over sub-directories.
|
||||
policy_files = next(os.walk(path))[2]
|
||||
policy_files.sort()
|
||||
for policy_file in [p for p in policy_files if not p.startswith('.')]:
|
||||
func(os.path.join(path, policy_file), *args)
|
||||
|
||||
def _load_policy_file(self, path, force_reload, overwrite=True):
|
||||
reloaded, data = fileutils.read_cached_file(
|
||||
self.policy_path, force_reload=force_reload)
|
||||
if reloaded or not self.rules:
|
||||
path, force_reload=force_reload)
|
||||
if reloaded or not self.rules or not overwrite:
|
||||
rules = Rules.load_json(data, self.default_rule)
|
||||
self.set_rules(rules)
|
||||
self.set_rules(rules, overwrite=overwrite, use_conf=True)
|
||||
LOG.debug("Rules successfully reloaded")
|
||||
|
||||
def _get_policy_path(self):
|
||||
"""Locate the policy json data file.
|
||||
def _get_policy_path(self, path):
|
||||
"""Locate the policy json data file/path.
|
||||
|
||||
:param policy_file: Custom policy file to locate.
|
||||
:param path: It's value can be a full path or related path. When
|
||||
full path specified, this function just returns the full
|
||||
path. When related path specified, this function will
|
||||
search configuration directories to find one that exists.
|
||||
|
||||
:returns: The policy path
|
||||
|
||||
:raises: ConfigFilesNotFoundError if the file couldn't
|
||||
:raises: ConfigFilesNotFoundError if the file/path couldn't
|
||||
be located.
|
||||
"""
|
||||
policy_file = CONF.find_file(self.policy_file)
|
||||
policy_path = CONF.find_file(path)
|
||||
|
||||
if policy_file:
|
||||
return policy_file
|
||||
if policy_path:
|
||||
return policy_path
|
||||
|
||||
raise cfg.ConfigFilesNotFoundError((self.policy_file,))
|
||||
raise cfg.ConfigFilesNotFoundError((path,))
|
||||
|
||||
def enforce(self, rule, target, creds, do_raise=False,
|
||||
exc=None, *args, **kwargs):
|
||||
@ -271,7 +327,7 @@ class Enforcer(object):
|
||||
:param do_raise: Whether to raise an exception or not if check
|
||||
fails.
|
||||
:param exc: Class of the exception to raise if the check fails.
|
||||
Any remaining arguments passed to check() (both
|
||||
Any remaining arguments passed to enforce() (both
|
||||
positional and keyword arguments) will be passed to
|
||||
the exception class. If not specified, PolicyNotAuthorized
|
||||
will be used.
|
||||
@ -283,10 +339,6 @@ class Enforcer(object):
|
||||
from the expression.
|
||||
"""
|
||||
|
||||
# NOTE(flaper87): Not logging target or creds to avoid
|
||||
# potential security issues.
|
||||
LOG.debug("Rule %s will be now enforced" % rule)
|
||||
|
||||
self.load_rules()
|
||||
|
||||
# Allow the rule to be a Check tree
|
||||
@ -788,7 +840,7 @@ def _parse_text_rule(rule):
|
||||
return state.result
|
||||
except ValueError:
|
||||
# Couldn't parse the rule
|
||||
LOG.exception(_LE("Failed to understand rule %r") % rule)
|
||||
LOG.exception(_LE("Failed to understand rule %s") % rule)
|
||||
|
||||
# Fail closed
|
||||
return FalseCheck()
|
||||
@ -859,7 +911,17 @@ class HttpCheck(Check):
|
||||
"""
|
||||
|
||||
url = ('http:' + self.match) % target
|
||||
data = {'target': jsonutils.dumps(target),
|
||||
|
||||
# Convert instances of object() in target temporarily to
|
||||
# empty dict to avoid circular reference detection
|
||||
# errors in jsonutils.dumps().
|
||||
temp_target = copy.deepcopy(target)
|
||||
for key in target.keys():
|
||||
element = target.get(key)
|
||||
if type(element) is object:
|
||||
temp_target[key] = {}
|
||||
|
||||
data = {'target': jsonutils.dumps(temp_target),
|
||||
'credentials': jsonutils.dumps(creds)}
|
||||
post_data = urlparse.urlencode(data)
|
||||
f = urlrequest.urlopen(url, post_data)
|
||||
@ -873,13 +935,12 @@ class GenericCheck(Check):
|
||||
|
||||
Matches look like:
|
||||
|
||||
project:%(project_id)s
|
||||
tenant:%(tenant_id)s
|
||||
role:compute:admin
|
||||
True:%(user.enabled)s
|
||||
'Member':%(role.name)s
|
||||
"""
|
||||
|
||||
# TODO(termie): do dict inspection via dot syntax
|
||||
try:
|
||||
match = self.match % target
|
||||
except KeyError:
|
||||
@ -892,7 +953,10 @@ class GenericCheck(Check):
|
||||
leftval = ast.literal_eval(self.kind)
|
||||
except ValueError:
|
||||
try:
|
||||
leftval = creds[self.kind]
|
||||
kind_parts = self.kind.split('.')
|
||||
leftval = creds
|
||||
for kind_part in kind_parts:
|
||||
leftval = leftval[kind_part]
|
||||
except KeyError:
|
||||
return False
|
||||
return match == six.text_type(leftval)
|
||||
|
@ -1,5 +1,3 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
@ -20,29 +18,82 @@
|
||||
"""Generic Node base class for all workers that run on hosts."""
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
# Importing just the symbol here because the io module does not
|
||||
# exist in Python 2.6.
|
||||
from io import UnsupportedOperation # noqa
|
||||
except ImportError:
|
||||
# Python 2.6
|
||||
UnsupportedOperation = None
|
||||
|
||||
import eventlet
|
||||
from eventlet import event
|
||||
import logging as std_logging
|
||||
from oslo.config import cfg
|
||||
|
||||
from barbican.openstack.common import eventlet_backdoor
|
||||
from barbican.openstack.common.gettextutils import _ # noqa
|
||||
from barbican.openstack.common import importutils
|
||||
from barbican.openstack.common import log as logging
|
||||
from barbican.openstack.common._i18n import _LE, _LI, _LW
|
||||
from barbican.openstack.common import systemd
|
||||
from barbican.openstack.common import threadgroup
|
||||
|
||||
|
||||
rpc = importutils.try_import('barbican.openstack.common.rpc')
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _sighup_supported():
|
||||
return hasattr(signal, 'SIGHUP')
|
||||
|
||||
|
||||
def _is_daemon():
|
||||
# The process group for a foreground process will match the
|
||||
# process group of the controlling terminal. If those values do
|
||||
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||
# the process is running in the background as a daemon.
|
||||
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||
try:
|
||||
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||
except OSError as err:
|
||||
if err.errno == errno.ENOTTY:
|
||||
# Assume we are a daemon because there is no terminal.
|
||||
is_daemon = True
|
||||
else:
|
||||
raise
|
||||
except UnsupportedOperation:
|
||||
# Could not get the fileno for stdout, so we must be a daemon.
|
||||
is_daemon = True
|
||||
return is_daemon
|
||||
|
||||
|
||||
def _is_sighup_and_daemon(signo):
|
||||
if not (_sighup_supported() and signo == signal.SIGHUP):
|
||||
# Avoid checking if we are a daemon, because the signal isn't
|
||||
# SIGHUP.
|
||||
return False
|
||||
return _is_daemon()
|
||||
|
||||
|
||||
def _signo_to_signame(signo):
|
||||
signals = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}
|
||||
if _sighup_supported():
|
||||
signals[signal.SIGHUP] = 'SIGHUP'
|
||||
return signals[signo]
|
||||
|
||||
|
||||
def _set_signals_handler(handler):
|
||||
signal.signal(signal.SIGTERM, handler)
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
if _sighup_supported():
|
||||
signal.signal(signal.SIGHUP, handler)
|
||||
|
||||
|
||||
class Launcher(object):
|
||||
"""Launch one or more services and wait for them to complete."""
|
||||
|
||||
@ -100,51 +151,41 @@ class SignalExit(SystemExit):
|
||||
class ServiceLauncher(Launcher):
|
||||
def _handle_signal(self, signo, frame):
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||
|
||||
_set_signals_handler(signal.SIG_DFL)
|
||||
raise SignalExit(signo)
|
||||
|
||||
def handle_signal(self):
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
signal.signal(signal.SIGHUP, self._handle_signal)
|
||||
_set_signals_handler(self._handle_signal)
|
||||
|
||||
def _wait_for_exit_or_signal(self):
|
||||
def _wait_for_exit_or_signal(self, ready_callback=None):
|
||||
status = None
|
||||
signo = 0
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
LOG.debug('Full set of CONF:')
|
||||
CONF.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
try:
|
||||
if ready_callback:
|
||||
ready_callback()
|
||||
super(ServiceLauncher, self).wait()
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT',
|
||||
signal.SIGHUP: 'SIGHUP'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
signame = _signo_to_signame(exc.signo)
|
||||
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
finally:
|
||||
self.stop()
|
||||
if rpc:
|
||||
try:
|
||||
rpc.cleanup()
|
||||
except Exception:
|
||||
# We're shutting down, so it doesn't matter at this point.
|
||||
LOG.exception(_('Exception during rpc cleanup.'))
|
||||
|
||||
return status, signo
|
||||
|
||||
def wait(self):
|
||||
def wait(self, ready_callback=None):
|
||||
systemd.notify_once()
|
||||
while True:
|
||||
self.handle_signal()
|
||||
status, signo = self._wait_for_exit_or_signal()
|
||||
if signo != signal.SIGHUP:
|
||||
status, signo = self._wait_for_exit_or_signal(ready_callback)
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
return status
|
||||
self.restart()
|
||||
|
||||
@ -158,34 +199,36 @@ class ServiceWrapper(object):
|
||||
|
||||
|
||||
class ProcessLauncher(object):
|
||||
def __init__(self):
|
||||
def __init__(self, wait_interval=0.01):
|
||||
"""Constructor.
|
||||
|
||||
:param wait_interval: The interval to sleep for between checks
|
||||
of child process exit.
|
||||
"""
|
||||
self.children = {}
|
||||
self.sigcaught = None
|
||||
self.running = True
|
||||
self.wait_interval = wait_interval
|
||||
rfd, self.writepipe = os.pipe()
|
||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||
self.handle_signal()
|
||||
|
||||
def handle_signal(self):
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
signal.signal(signal.SIGHUP, self._handle_signal)
|
||||
_set_signals_handler(self._handle_signal)
|
||||
|
||||
def _handle_signal(self, signo, frame):
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||
_set_signals_handler(signal.SIG_DFL)
|
||||
|
||||
def _pipe_watcher(self):
|
||||
# This will block until the write end is closed when the parent
|
||||
# dies unexpectedly
|
||||
self.readpipe.read()
|
||||
|
||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
||||
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
@ -200,27 +243,29 @@ class ProcessLauncher(object):
|
||||
raise SignalExit(signal.SIGHUP)
|
||||
|
||||
signal.signal(signal.SIGTERM, _sigterm)
|
||||
signal.signal(signal.SIGHUP, _sighup)
|
||||
if _sighup_supported():
|
||||
signal.signal(signal.SIGHUP, _sighup)
|
||||
# Block SIGINT and let the parent send us a SIGTERM
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def _child_wait_for_exit_or_signal(self, launcher):
|
||||
status = None
|
||||
status = 0
|
||||
signo = 0
|
||||
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
try:
|
||||
launcher.wait()
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT',
|
||||
signal.SIGHUP: 'SIGHUP'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
signame = _signo_to_signame(exc.signo)
|
||||
LOG.info(_LI('Child caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
LOG.exception(_('Unhandled exception'))
|
||||
LOG.exception(_LE('Unhandled exception'))
|
||||
status = 2
|
||||
finally:
|
||||
launcher.stop()
|
||||
@ -253,7 +298,7 @@ class ProcessLauncher(object):
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info(_('Forking too fast, sleeping'))
|
||||
LOG.info(_LI('Forking too fast, sleeping'))
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
@ -262,20 +307,17 @@ class ProcessLauncher(object):
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
launcher = self._child_process(wrap.service)
|
||||
while True:
|
||||
self._child_process_handle_signal()
|
||||
status, signo = self._child_wait_for_exit_or_signal(launcher)
|
||||
if signo != signal.SIGHUP:
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
break
|
||||
launcher.restart()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.info(_('Started child %d'), pid)
|
||||
LOG.info(_LI('Started child %d'), pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
@ -285,7 +327,7 @@ class ProcessLauncher(object):
|
||||
def launch_service(self, service, workers=1):
|
||||
wrap = ServiceWrapper(service, workers)
|
||||
|
||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
||||
LOG.info(_LI('Starting %d workers'), wrap.workers)
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
@ -302,15 +344,15 @@ class ProcessLauncher(object):
|
||||
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
|
||||
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
|
||||
dict(pid=pid, sig=sig))
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
||||
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
|
||||
dict(pid=pid, code=code))
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning(_('pid %d not in child list'), pid)
|
||||
LOG.warning(_LW('pid %d not in child list'), pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
@ -324,7 +366,7 @@ class ProcessLauncher(object):
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(.01)
|
||||
eventlet.greenthread.sleep(self.wait_interval)
|
||||
continue
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
@ -332,25 +374,35 @@ class ProcessLauncher(object):
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary."""
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
systemd.notify_once()
|
||||
LOG.debug('Full set of CONF:')
|
||||
CONF.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
while True:
|
||||
self.handle_signal()
|
||||
self._respawn_children()
|
||||
if self.sigcaught:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT',
|
||||
signal.SIGHUP: 'SIGHUP'}[self.sigcaught]
|
||||
LOG.info(_('Caught %s, stopping children'), signame)
|
||||
if self.sigcaught != signal.SIGHUP:
|
||||
break
|
||||
try:
|
||||
while True:
|
||||
self.handle_signal()
|
||||
self._respawn_children()
|
||||
# No signal means that stop was called. Don't clean up here.
|
||||
if not self.sigcaught:
|
||||
return
|
||||
|
||||
for pid in self.children:
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
self.running = True
|
||||
self.sigcaught = None
|
||||
signame = _signo_to_signame(self.sigcaught)
|
||||
LOG.info(_LI('Caught %s, stopping children'), signame)
|
||||
if not _is_sighup_and_daemon(self.sigcaught):
|
||||
break
|
||||
|
||||
for pid in self.children:
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
self.running = True
|
||||
self.sigcaught = None
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
||||
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
"""Terminate child processes and wait on each."""
|
||||
self.running = False
|
||||
for pid in self.children:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
@ -360,7 +412,7 @@ class ProcessLauncher(object):
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
||||
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
@ -381,8 +433,8 @@ class Service(object):
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
self.tg.stop()
|
||||
def stop(self, graceful=False):
|
||||
self.tg.stop(graceful)
|
||||
self.tg.wait()
|
||||
# Signal that service cleanup is done:
|
||||
if not self._done.ready():
|
||||
@ -440,11 +492,12 @@ class Services(object):
|
||||
done.wait()
|
||||
|
||||
|
||||
def launch(service, workers=None):
|
||||
if workers:
|
||||
launcher = ProcessLauncher()
|
||||
launcher.launch_service(service, workers=workers)
|
||||
else:
|
||||
def launch(service, workers=1):
|
||||
if workers is None or workers == 1:
|
||||
launcher = ServiceLauncher()
|
||||
launcher.launch_service(service)
|
||||
else:
|
||||
launcher = ProcessLauncher()
|
||||
launcher.launch_service(service, workers=workers)
|
||||
|
||||
return launcher
|
||||
|
105
barbican/openstack/common/systemd.py
Normal file
105
barbican/openstack/common/systemd.py
Normal file
@ -0,0 +1,105 @@
|
||||
# Copyright 2012-2014 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Helper module for systemd service readiness notification.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _abstractify(socket_name):
|
||||
if socket_name.startswith('@'):
|
||||
# abstract namespace socket
|
||||
socket_name = '\0%s' % socket_name[1:]
|
||||
return socket_name
|
||||
|
||||
|
||||
def _sd_notify(unset_env, msg):
|
||||
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||
if notify_socket:
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||
try:
|
||||
sock.connect(_abstractify(notify_socket))
|
||||
sock.sendall(msg)
|
||||
if unset_env:
|
||||
del os.environ['NOTIFY_SOCKET']
|
||||
except EnvironmentError:
|
||||
LOG.debug("Systemd notification failed", exc_info=True)
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
|
||||
def notify():
|
||||
"""Send notification to Systemd that service is ready.
|
||||
|
||||
For details see
|
||||
http://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||
"""
|
||||
_sd_notify(False, 'READY=1')
|
||||
|
||||
|
||||
def notify_once():
|
||||
"""Send notification once to Systemd that service is ready.
|
||||
|
||||
Systemd sets NOTIFY_SOCKET environment variable with the name of the
|
||||
socket listening for notifications from services.
|
||||
This method removes the NOTIFY_SOCKET environment variable to ensure
|
||||
notification is sent only once.
|
||||
"""
|
||||
_sd_notify(True, 'READY=1')
|
||||
|
||||
|
||||
def onready(notify_socket, timeout):
|
||||
"""Wait for systemd style notification on the socket.
|
||||
|
||||
:param notify_socket: local socket address
|
||||
:type notify_socket: string
|
||||
:param timeout: socket timeout
|
||||
:type timeout: float
|
||||
:returns: 0 service ready
|
||||
1 service not ready
|
||||
2 timeout occurred
|
||||
"""
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||
sock.settimeout(timeout)
|
||||
sock.bind(_abstractify(notify_socket))
|
||||
try:
|
||||
msg = sock.recv(512)
|
||||
except socket.timeout:
|
||||
return 2
|
||||
finally:
|
||||
sock.close()
|
||||
if 'READY=1' in msg:
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# simple CLI for testing
|
||||
if len(sys.argv) == 1:
|
||||
notify()
|
||||
elif len(sys.argv) >= 2:
|
||||
timeout = float(sys.argv[1])
|
||||
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||
if notify_socket:
|
||||
retval = onready(notify_socket, timeout)
|
||||
sys.exit(retval)
|
@ -1,5 +1,3 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -13,12 +11,12 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import eventlet
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
|
||||
from barbican.openstack.common import log as logging
|
||||
from barbican.openstack.common import loopingcall
|
||||
|
||||
|
||||
@ -48,9 +46,12 @@ class Thread(object):
|
||||
def wait(self):
|
||||
return self.thread.wait()
|
||||
|
||||
def link(self, func, *args, **kwargs):
|
||||
self.thread.link(func, *args, **kwargs)
|
||||
|
||||
|
||||
class ThreadGroup(object):
|
||||
"""The point of the ThreadGroup classis to:
|
||||
"""The point of the ThreadGroup class is to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
@ -79,21 +80,28 @@ class ThreadGroup(object):
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(gt, self)
|
||||
self.threads.append(th)
|
||||
return th
|
||||
|
||||
def thread_done(self, thread):
|
||||
self.threads.remove(thread)
|
||||
|
||||
def stop(self):
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
def _stop_threads(self):
|
||||
current = threading.current_thread()
|
||||
|
||||
# Iterate over a copy of self.threads so thread_done doesn't
|
||||
# modify the list while we're iterating
|
||||
for x in self.threads[:]:
|
||||
if x is current:
|
||||
# don't kill the current thread.
|
||||
continue
|
||||
try:
|
||||
x.stop()
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
|
||||
def stop_timers(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
@ -101,6 +109,23 @@ class ThreadGroup(object):
|
||||
LOG.exception(ex)
|
||||
self.timers = []
|
||||
|
||||
def stop(self, graceful=False):
|
||||
"""stop function has the option of graceful=True/False.
|
||||
|
||||
* In case of graceful=True, wait for all threads to be finished.
|
||||
Never kill threads.
|
||||
* In case of graceful=False, kill threads immediately.
|
||||
"""
|
||||
self.stop_timers()
|
||||
if graceful:
|
||||
# In case of graceful=True, wait for all threads to be
|
||||
# finished, never kill threads
|
||||
self.wait()
|
||||
else:
|
||||
# In case of graceful=False(Default), kill threads
|
||||
# immediately
|
||||
self._stop_threads()
|
||||
|
||||
def wait(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
@ -109,8 +134,11 @@ class ThreadGroup(object):
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
current = threading.current_thread()
|
||||
|
||||
# Iterate over a copy of self.threads so thread_done doesn't
|
||||
# modify the list while we're iterating
|
||||
for x in self.threads[:]:
|
||||
if x is current:
|
||||
continue
|
||||
try:
|
||||
|
@ -1,7 +1,7 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The list of modules to copy from openstack-common
|
||||
modules=jsonutils,log,local,timeutils,importutils,policy
|
||||
modules=local,policy,eventlet_backdoor,service
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=barbican
|
||||
|
Loading…
x
Reference in New Issue
Block a user