From b9d60d50bdd5bc9ec55648ca78b4712b13974739 Mon Sep 17 00:00:00 2001 From: Serg Melikyan Date: Mon, 4 Mar 2013 13:59:08 +0400 Subject: [PATCH] Initial version of portas-api Portas-api is REST API component for Windows DataCenter as a Service In initial version there is only draft support for Environments API --- api/openstack-common.conf | 0 api/setup.py | 90 ++ portas/.gitignore | 20 + portas/babel.cfg | 1 + portas/bin/portas-api | 47 + portas/doc/source/_static/basic.css | 416 ++++++ portas/doc/source/_static/default.css | 230 ++++ portas/doc/source/_static/header-line.gif | Bin 0 -> 48 bytes portas/doc/source/_static/header_bg.jpg | Bin 0 -> 3738 bytes portas/doc/source/_static/jquery.tweet.js | 154 +++ portas/doc/source/_static/nature.css | 245 ++++ portas/doc/source/_static/openstack_logo.png | Bin 0 -> 3670 bytes portas/doc/source/_static/tweaks.css | 94 ++ portas/doc/source/_templates/.placeholder | 0 portas/doc/source/_theme/layout.html | 83 ++ portas/doc/source/_theme/theme.conf | 4 + portas/doc/source/conf.py | 261 ++++ portas/doc/source/index.rst | 83 ++ portas/etc/portas-api-paste.ini | 57 + portas/etc/portas-api.conf | 34 + portas/openstack-common.conf | 7 + portas/portas/__init__.py | 0 portas/portas/api/__init__.py | 0 portas/portas/api/v1/__init__.py | 0 portas/portas/api/v1/environments.py | 0 portas/portas/api/v1/router.py | 50 + portas/portas/common/__init__.py | 0 portas/portas/common/config.py | 215 +++ portas/portas/common/exception.py | 272 ++++ portas/portas/db/__init__.py | 1 + portas/portas/db/api.py | 1 + portas/portas/db/migrate_repo/README | 4 + portas/portas/db/migrate_repo/__init__.py | 1 + portas/portas/db/migrate_repo/manage.py | 21 + portas/portas/db/migrate_repo/migrate.cfg | 20 + .../versions/001_add_initial_tables.py | 41 + .../db/migrate_repo/versions/__init__.py | 1 + portas/portas/db/models.py | 181 +++ portas/portas/db/session.py | 122 ++ portas/portas/locale/ru/LC_MESSAGES/portas.po | 1216 +++++++++++++++++ portas/portas/openstack/__init__.py | 0 portas/portas/openstack/common/__init__.py | 0 .../openstack/common/eventlet_backdoor.py | 87 ++ portas/portas/openstack/common/exception.py | 142 ++ .../portas/openstack/common/gettextutils.py | 33 + portas/portas/openstack/common/importutils.py | 67 + portas/portas/openstack/common/jsonutils.py | 147 ++ portas/portas/openstack/common/local.py | 48 + portas/portas/openstack/common/log.py | 521 +++++++ portas/portas/openstack/common/loopingcall.py | 95 ++ .../openstack/common/notifier/__init__.py | 14 + .../portas/openstack/common/notifier/api.py | 183 +++ .../openstack/common/notifier/log_notifier.py | 35 + .../common/notifier/no_op_notifier.py | 19 + .../common/notifier/rabbit_notifier.py | 29 + .../openstack/common/notifier/rpc_notifier.py | 46 + .../common/notifier/rpc_notifier2.py | 52 + .../common/notifier/test_notifier.py | 22 + portas/portas/openstack/common/service.py | 332 +++++ portas/portas/openstack/common/setup.py | 359 +++++ portas/portas/openstack/common/sslutils.py | 80 ++ portas/portas/openstack/common/threadgroup.py | 114 ++ portas/portas/openstack/common/timeutils.py | 182 +++ portas/portas/openstack/common/uuidutils.py | 39 + portas/portas/openstack/common/version.py | 94 ++ portas/portas/openstack/common/wsgi.py | 797 +++++++++++ portas/portas/openstack/common/xmlutils.py | 74 + portas/portas/schema.py | 107 ++ portas/portas/tests/__init__.py | 0 portas/portas/tests/api/__init__.py | 1 + portas/portas/tests/api/simple_test.py | 6 + portas/portas/version.py | 20 + portas/run_tests.sh | 123 ++ portas/setup.cfg | 9 + portas/setup.py | 48 + portas/tools/install_venv.py | 75 + portas/tools/install_venv_common.py | 219 +++ portas/tools/pip-requires | 23 + portas/tools/test-requires | 0 portas/tools/with_venv.sh | 4 + 80 files changed, 8218 insertions(+) create mode 100644 api/openstack-common.conf create mode 100644 api/setup.py create mode 100644 portas/.gitignore create mode 100644 portas/babel.cfg create mode 100755 portas/bin/portas-api create mode 100644 portas/doc/source/_static/basic.css create mode 100644 portas/doc/source/_static/default.css create mode 100644 portas/doc/source/_static/header-line.gif create mode 100644 portas/doc/source/_static/header_bg.jpg create mode 100644 portas/doc/source/_static/jquery.tweet.js create mode 100644 portas/doc/source/_static/nature.css create mode 100644 portas/doc/source/_static/openstack_logo.png create mode 100644 portas/doc/source/_static/tweaks.css create mode 100644 portas/doc/source/_templates/.placeholder create mode 100644 portas/doc/source/_theme/layout.html create mode 100644 portas/doc/source/_theme/theme.conf create mode 100644 portas/doc/source/conf.py create mode 100644 portas/doc/source/index.rst create mode 100644 portas/etc/portas-api-paste.ini create mode 100644 portas/etc/portas-api.conf create mode 100644 portas/openstack-common.conf create mode 100644 portas/portas/__init__.py create mode 100644 portas/portas/api/__init__.py create mode 100644 portas/portas/api/v1/__init__.py create mode 100644 portas/portas/api/v1/environments.py create mode 100644 portas/portas/api/v1/router.py create mode 100644 portas/portas/common/__init__.py create mode 100644 portas/portas/common/config.py create mode 100644 portas/portas/common/exception.py create mode 100644 portas/portas/db/__init__.py create mode 100644 portas/portas/db/api.py create mode 100644 portas/portas/db/migrate_repo/README create mode 100644 portas/portas/db/migrate_repo/__init__.py create mode 100644 portas/portas/db/migrate_repo/manage.py create mode 100644 portas/portas/db/migrate_repo/migrate.cfg create mode 100644 portas/portas/db/migrate_repo/versions/001_add_initial_tables.py create mode 100644 portas/portas/db/migrate_repo/versions/__init__.py create mode 100644 portas/portas/db/models.py create mode 100644 portas/portas/db/session.py create mode 100644 portas/portas/locale/ru/LC_MESSAGES/portas.po create mode 100644 portas/portas/openstack/__init__.py create mode 100644 portas/portas/openstack/common/__init__.py create mode 100644 portas/portas/openstack/common/eventlet_backdoor.py create mode 100644 portas/portas/openstack/common/exception.py create mode 100644 portas/portas/openstack/common/gettextutils.py create mode 100644 portas/portas/openstack/common/importutils.py create mode 100644 portas/portas/openstack/common/jsonutils.py create mode 100644 portas/portas/openstack/common/local.py create mode 100644 portas/portas/openstack/common/log.py create mode 100644 portas/portas/openstack/common/loopingcall.py create mode 100644 portas/portas/openstack/common/notifier/__init__.py create mode 100644 portas/portas/openstack/common/notifier/api.py create mode 100644 portas/portas/openstack/common/notifier/log_notifier.py create mode 100644 portas/portas/openstack/common/notifier/no_op_notifier.py create mode 100644 portas/portas/openstack/common/notifier/rabbit_notifier.py create mode 100644 portas/portas/openstack/common/notifier/rpc_notifier.py create mode 100644 portas/portas/openstack/common/notifier/rpc_notifier2.py create mode 100644 portas/portas/openstack/common/notifier/test_notifier.py create mode 100644 portas/portas/openstack/common/service.py create mode 100644 portas/portas/openstack/common/setup.py create mode 100644 portas/portas/openstack/common/sslutils.py create mode 100644 portas/portas/openstack/common/threadgroup.py create mode 100644 portas/portas/openstack/common/timeutils.py create mode 100644 portas/portas/openstack/common/uuidutils.py create mode 100644 portas/portas/openstack/common/version.py create mode 100644 portas/portas/openstack/common/wsgi.py create mode 100644 portas/portas/openstack/common/xmlutils.py create mode 100644 portas/portas/schema.py create mode 100644 portas/portas/tests/__init__.py create mode 100644 portas/portas/tests/api/__init__.py create mode 100644 portas/portas/tests/api/simple_test.py create mode 100644 portas/portas/version.py create mode 100755 portas/run_tests.sh create mode 100644 portas/setup.cfg create mode 100644 portas/setup.py create mode 100644 portas/tools/install_venv.py create mode 100644 portas/tools/install_venv_common.py create mode 100644 portas/tools/pip-requires create mode 100644 portas/tools/test-requires create mode 100755 portas/tools/with_venv.sh diff --git a/api/openstack-common.conf b/api/openstack-common.conf new file mode 100644 index 0000000..e69de29 diff --git a/api/setup.py b/api/setup.py new file mode 100644 index 0000000..3265bca --- /dev/null +++ b/api/setup.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import subprocess + +from setuptools import setup, find_packages +from setuptools.command.sdist import sdist + +from windc import version + + +if os.path.isdir('.bzr'): + with open("windc/vcsversion.py", 'w') as version_file: + vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"], + stdout=subprocess.PIPE) + vcsversion = vcs_cmd.communicate()[0] + version_file.write(vcsversion) + + +class local_sdist(sdist): + """Customized sdist hook - builds the ChangeLog file from VC first""" + + def run(self): + if os.path.isdir('.bzr'): + # We're in a bzr branch + + log_cmd = subprocess.Popen(["bzr", "log", "--gnu"], + stdout=subprocess.PIPE) + changelog = log_cmd.communicate()[0] + with open("ChangeLog", "w") as changelog_file: + changelog_file.write(changelog) + sdist.run(self) + +cmdclass = {'sdist': local_sdist} + +# If Sphinx is installed on the box running setup.py, +# enable setup.py to build the documentation, otherwise, +# just ignore it +try: + from sphinx.setup_command import BuildDoc + + class local_BuildDoc(BuildDoc): + def run(self): + for builder in ['html', 'man']: + self.builder = builder + self.finalize_options() + BuildDoc.run(self) + cmdclass['build_sphinx'] = local_BuildDoc + +except: + pass + + +setup( + name='windc', + version=version.canonical_version_string(), + description='The WinDC project provides a simple WSGI server for Windows Environment Management', + license='Apache License (2.0)', + author='OpenStack', + author_email='openstack@lists.launchpad.net', + url='http://windc.openstack.org/', + packages=find_packages(exclude=['tests', 'bin']), + test_suite='nose.collector', + cmdclass=cmdclass, + include_package_data=True, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python :: 2.6', + 'Environment :: No Input/Output (Daemon)', + ], + scripts=['bin/windc', + 'bin/windc-api']) diff --git a/portas/.gitignore b/portas/.gitignore new file mode 100644 index 0000000..7b4690b --- /dev/null +++ b/portas/.gitignore @@ -0,0 +1,20 @@ +##IntelJ Idea +.idea/ + +#virtualenv +.venv/ + +#Build results +build/ +dist/ +*.egg-info/ + +#Python +*.pyc + +#Translation build +*.mo +*.pot + +#SQLite Database files +*.sqlite \ No newline at end of file diff --git a/portas/babel.cfg b/portas/babel.cfg new file mode 100644 index 0000000..efceab8 --- /dev/null +++ b/portas/babel.cfg @@ -0,0 +1 @@ +[python: **.py] diff --git a/portas/bin/portas-api b/portas/bin/portas-api new file mode 100755 index 0000000..bb7817d --- /dev/null +++ b/portas/bin/portas-api @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import gettext +import os +import sys + + +# If ../portas/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'portas', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('portas', './portas/locale', unicode=1) + +from portas.common import config +from portas.openstack.common import log +from portas.openstack.common import wsgi + + +if __name__ == '__main__': + try: + config.parse_args() + + server = wsgi.Server() + server.start(config.load_paste_app(), default_port=8181) + server.wait() + except RuntimeError, e: + sys.stderr.write("ERROR: %s\n" % e) + sys.exit(1) diff --git a/portas/doc/source/_static/basic.css b/portas/doc/source/_static/basic.css new file mode 100644 index 0000000..d909ce3 --- /dev/null +++ b/portas/doc/source/_static/basic.css @@ -0,0 +1,416 @@ +/** + * Sphinx stylesheet -- basic theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +img { + border: 0; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +/* -- general body styles --------------------------------------------------- */ + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.field-list ul { + padding-left: 1em; +} + +.first { +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 0; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +/* -- other body styles ----------------------------------------------------- */ + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlight { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.refcount { + color: #060; +} + +.optional { + font-size: 1.3em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +tt.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +tt.descclassname { + background-color: transparent; +} + +tt.xref, a tt { + background-color: transparent; + font-weight: bold; +} + +h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { + background-color: transparent; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} diff --git a/portas/doc/source/_static/default.css b/portas/doc/source/_static/default.css new file mode 100644 index 0000000..c8091ec --- /dev/null +++ b/portas/doc/source/_static/default.css @@ -0,0 +1,230 @@ +/** + * Sphinx stylesheet -- default theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: sans-serif; + font-size: 100%; + background-color: #11303d; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + background-color: #1c4e63; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: #ffffff; + color: #000000; + padding: 0 20px 30px 20px; +} + +div.footer { + color: #ffffff; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #ffffff; + text-decoration: underline; +} + +div.related { + background-color: #133f52; + line-height: 30px; + color: #ffffff; +} + +div.related a { + color: #ffffff; +} + +div.sphinxsidebar { +} + +div.sphinxsidebar h3 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sphinxsidebar h3 a { + color: #ffffff; +} + +div.sphinxsidebar h4 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.3em; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sphinxsidebar p { + color: #ffffff; +} + +div.sphinxsidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sphinxsidebar ul { + margin: 10px; + padding: 0; + color: #ffffff; +} + +div.sphinxsidebar a { + color: #98dbcc; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #355f7c; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +div.body p, div.body dd, div.body li { + text-align: left; + line-height: 130%; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Trebuchet MS', sans-serif; + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + text-align: left; + line-height: 130%; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.admonition p { + margin-bottom: 5px; +} + +div.admonition pre { + margin-bottom: 5px; +} + +div.admonition ul, div.admonition ol { + margin-bottom: 5px; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 5px; + background-color: #eeffcc; + color: #333333; + line-height: 120%; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +tt { + background-color: #ecf0f3; + padding: 0 1px 0 1px; + font-size: 0.95em; +} + +.warning tt { + background: #efc2c2; +} + +.note tt { + background: #d6d6d6; +} diff --git a/portas/doc/source/_static/header-line.gif b/portas/doc/source/_static/header-line.gif new file mode 100644 index 0000000000000000000000000000000000000000..3601730e03488b7b5f92dc992d23ad753357c167 GIT binary patch literal 48 zcmZ?wbhEHbWMg1uXkcVG`smgF|Nj+#vM@3*Ff!;c00Bsbfr-7RpY8O^Kn4bD08FwB Aga7~l literal 0 HcmV?d00001 diff --git a/portas/doc/source/_static/header_bg.jpg b/portas/doc/source/_static/header_bg.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f788c41c26481728fa4329c17c87bde36001adc1 GIT binary patch literal 3738 zcmd5-YdDna8vedHnM0NtYi6>>At7O=uyTsZup5R_40A9)aXQa}U(l^=gSg=J*&3mKp$aM0r>UIFDe9Zy(vs} zWf)kqO2Y_n0$>ZQ0D&hY4tWjpY?Ii5?V)h*kc0fz?%ZIj3|{;F8E5l%d0)&*Hx~ulvc_*73u8%R zsVMV~ne!JY);&pWott~QIZYJFTXliYc2};JEU{X7W6;ZPfz;)U;U4#mEuK@K*=SC3BR-m&x9(Nna@>b@%FS34|P^jtsXRb5>z9gtPp;_MI2F3o*k z>csA-?CX4b;~4P-*L$+Mmb|51F)eD*wCc`Jt(9}C${Zo=!Uin=u_yMC^;`X!x$##4 z+~}dkT`NF@Uhw0r+6g_)?e!h8IX+OE^C96>UOsv0GPMD6(kr#ljhXRnA=O>Qj@%iT zqBF7aQ*}BG)h@6r0%#azk!r9yrN6>9dq~>KadV$~cGG?Hjk>~it^5rd#zS4KE*p+4 z;;B)%oBK8PNTs=A)a-z`n?3zJ%+h{`=>ijk4sYKr*>`eN1H`~Lo|Tm!o6qN{S* zeNl=NcpGzD55)XnLC|>g)~w={=c#4*x^;mk4Zo_FOFlffP@!?1`c+TogTVR4kp9-q z`d5cMBzNxk6qjPRK9*WY3uHS=bnm_QJvSMBBS_A#3i=ywsg6^|9rfruW0MhdGwHDO z?1gJRMQVecKE^gV{%uo(b)zl^Hd&vmnwFh88h*-?FJ;y=Hdqvt!K|s<$>xlzR=G4{ zZgGOCF43IXS?62B)w*N&dXt%U8X^Bjx}^%Yf>VFpFoKSGP%k?ems;&&J)|Dx(qtQD zu2tS)<_Qz4#LhBKYkl@Og}G)^5+F4P($Fk>)}{uMVv|;Sz2i4$XJ_WTw*;n>3N805rnXhbC52SC={E3rXRlrs|I6f;o|Cn%eje59{axu9sivy4oYmg=j|fLt3<3 zFce84aNb8GbK;y>RbBu71YBcYKL3@M3N25yoE%BtG z^K!`WTQ|fb-Ysa7T)mEw&4_b)PWYgc!)3W)H+neR9o^f|AXdgY1`gN+pvgzbbk`M z*Ts6${7M`2)9XIPy^MoXTiiP2GTp_OtgWMshnH)M&ZSO0)cet!oWo_0_&hV(0?Qdb zdo(sw{I#{hI`SWPM`N=U^#+MgN-*rZ#J7Cm7Jj89`5ehd_{z&9->Jc7$F(X4)&|`K z5rEgd;@dhi-IzJnSVpMd!Gf_G-QW+ zjVMrIas1)g%)GJ;(=oaK};O^)NYdS1`XR?K_;I7qj zhii5}x^he{U3M+GF+WpYws#=Pt#S9xB_X5QE7W+_rQdwMhukJnQj}5cnCz_sIJ#r0 zJa5drkRPI$X(4YdpCswJe#5aN4Jjw3V3Nzt&`lcKBI~#;!>jq7j8y# zvHrFg_#P376A45^hp-KU*P=R;DVdPK*w7D@Gw+`XsSpm^L-VkCooZF61sPAnnjsT# zND4C{>G#P10F_&txEoE!rX%Iy*L}Kna=Q%fDLJ_rF*LujRITZ)$g!?UYLkCXOoz-S z_p`Hny*Rh--l)aYQC&-2dd%;%VKGC1<1DJm_n~`nk4^yS`}&P zM}5bOypW0hwtvrwnE>}g1Mq+B>09qPp1b$hn6kC_iqF`tX#G-t7D$n}Ky9t}sUqiI zOe@odQ?JueZ+sg`-zoQ}J4if6vv1c9x{BDme+F6z{8esU^Kio zK_oPy9}@nlGywSOZy9`^- zzBg>C9|rgWF{pcCogEV@;d}VHrgeBl=5Dr*th4V!1`Z9Zrz9le1zHC#sM3{j#G2R?WMhl6b_yyoEAxX>Zixl$16`+^d$ihNtuIBUafyiCEv#oksNL<4= z*oDXsc7-(ww^9-b-6_|bITySG1N2C-7p0L4+V@R%j=4@ygc=89bmSNy38$S=ZiDyP z0SrqrVA;zi8kYBZ2@Mx(2Lx~-*bc@d1#4R($RJv$9ZTfx_t7Kc|HIHnd&@I386P?& z?d6Vd(48n${cTNFFCoSIUj#O{mmt%M&xCIFmR9Y3f{2UnF4e9@uFZOaYiY|CLdbDa z%xS9x4SHi7Fr-1?CnDqRK?)n&$TTBW5J?O&o{TnNCnLw*{QmT7{c}flSbp9&xi*zF z1TdUn&_!$_WxQbMKGkgsl}B%+N5ZV%Hy6_zJ>dejD89yCBMw9(d}z2fWjYH_nV6!F zqe_rI2H5Pi0^~S6)jjnu%lqZN*eQq6!||a24+edpSH_{C8Ew^g8dw2qdrH!@*E7K* z)00Bb8uUsai%v6Oa^L@3E02r|EG%EdV>q;=#2Q9Wjv3l?dAur$4bzyOl3M6 z1hf%&o*#2R&xnS1z4&R`Uq%`Ut0_P{BOwt;FuDb$1")); + }); + return $(returning); + }, + linkUser: function() { + var returning = []; + var regexp = /[\@]+([A-Za-z0-9-_]+)/gi; + this.each(function() { + returning.push(this.replace(regexp,"@$1")); + }); + return $(returning); + }, + linkHash: function() { + var returning = []; + var regexp = / [\#]+([A-Za-z0-9-_]+)/gi; + this.each(function() { + returning.push(this.replace(regexp, ' #$1')); + }); + return $(returning); + }, + capAwesome: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/\b(awesome)\b/gi, '$1')); + }); + return $(returning); + }, + capEpic: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/\b(epic)\b/gi, '$1')); + }); + return $(returning); + }, + makeHeart: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/(<)+[3]/gi, "")); + }); + return $(returning); + } + }); + + function relative_time(time_value) { + var parsed_date = Date.parse(time_value); + var relative_to = (arguments.length > 1) ? arguments[1] : new Date(); + var delta = parseInt((relative_to.getTime() - parsed_date) / 1000); + var pluralize = function (singular, n) { + return '' + n + ' ' + singular + (n == 1 ? '' : 's'); + }; + if(delta < 60) { + return 'less than a minute ago'; + } else if(delta < (45*60)) { + return 'about ' + pluralize("minute", parseInt(delta / 60)) + ' ago'; + } else if(delta < (24*60*60)) { + return 'about ' + pluralize("hour", parseInt(delta / 3600)) + ' ago'; + } else { + return 'about ' + pluralize("day", parseInt(delta / 86400)) + ' ago'; + } + } + + function build_url() { + var proto = ('https:' == document.location.protocol ? 'https:' : 'http:'); + if (s.list) { + return proto+"//api.twitter.com/1/"+s.username[0]+"/lists/"+s.list+"/statuses.json?per_page="+s.count+"&callback=?"; + } else if (s.query == null && s.username.length == 1) { + return proto+'//twitter.com/status/user_timeline/'+s.username[0]+'.json?count='+s.count+'&callback=?'; + } else { + var query = (s.query || 'from:'+s.username.join('%20OR%20from:')); + return proto+'//search.twitter.com/search.json?&q='+query+'&rpp='+s.count+'&callback=?'; + } + } + + return this.each(function(){ + var list = $('
    ').appendTo(this); + var intro = '

    '+s.intro_text+'

    '; + var outro = '

    '+s.outro_text+'

    '; + var loading = $('

    '+s.loading_text+'

    '); + + if(typeof(s.username) == "string"){ + s.username = [s.username]; + } + + if (s.loading_text) $(this).append(loading); + $.getJSON(build_url(), function(data){ + if (s.loading_text) loading.remove(); + if (s.intro_text) list.before(intro); + $.each((data.results || data), function(i,item){ + // auto join text based on verb tense and content + if (s.join_text == "auto") { + if (item.text.match(/^(@([A-Za-z0-9-_]+)) .*/i)) { + var join_text = s.auto_join_text_reply; + } else if (item.text.match(/(^\w+:\/\/[A-Za-z0-9-_]+\.[A-Za-z0-9-_:%&\?\/.=]+) .*/i)) { + var join_text = s.auto_join_text_url; + } else if (item.text.match(/^((\w+ed)|just) .*/im)) { + var join_text = s.auto_join_text_ed; + } else if (item.text.match(/^(\w*ing) .*/i)) { + var join_text = s.auto_join_text_ing; + } else { + var join_text = s.auto_join_text_default; + } + } else { + var join_text = s.join_text; + }; + + var from_user = item.from_user || item.user.screen_name; + var profile_image_url = item.profile_image_url || item.user.profile_image_url; + var join_template = ' '+join_text+' '; + var join = ((s.join_text) ? join_template : ' '); + var avatar_template = ''+from_user+'\'s avatar'; + var avatar = (s.avatar_size ? avatar_template : ''); + var date = ''+relative_time(item.created_at)+''; + var text = '' +$([item.text]).linkUrl().linkUser().linkHash().makeHeart().capAwesome().capEpic()[0]+ ''; + + // until we create a template option, arrange the items below to alter a tweet's display. + list.append('
  • ' + avatar + date + join + text + '
  • '); + + list.children('li:first').addClass('tweet_first'); + list.children('li:odd').addClass('tweet_even'); + list.children('li:even').addClass('tweet_odd'); + }); + if (s.outro_text) list.after(outro); + }); + + }); + }; +})(jQuery); \ No newline at end of file diff --git a/portas/doc/source/_static/nature.css b/portas/doc/source/_static/nature.css new file mode 100644 index 0000000..a98bd42 --- /dev/null +++ b/portas/doc/source/_static/nature.css @@ -0,0 +1,245 @@ +/* + * nature.css_t + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- nature theme. + * + * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Arial, sans-serif; + font-size: 100%; + background-color: #111; + color: #555; + margin: 0; + padding: 0; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 {{ theme_sidebarwidth|toint }}px; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #eee; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 30px 30px; + font-size: 0.9em; +} + +div.footer { + color: #555; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #444; + text-decoration: underline; +} + +div.related { + background-color: #6BA81E; + line-height: 32px; + color: #fff; + text-shadow: 0px 1px 0 #444; + font-size: 0.9em; +} + +div.related a { + color: #E2F3CC; +} + +div.sphinxsidebar { + font-size: 0.75em; + line-height: 1.5em; +} + +div.sphinxsidebarwrapper{ + padding: 20px 0; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Arial, sans-serif; + color: #222; + font-size: 1.2em; + font-weight: normal; + margin: 0; + padding: 5px 10px; + background-color: #ddd; + text-shadow: 1px 1px 0 white +} + +div.sphinxsidebar h4{ + font-size: 1.1em; +} + +div.sphinxsidebar h3 a { + color: #444; +} + + +div.sphinxsidebar p { + color: #888; + padding: 5px 20px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 20px; + padding: 0; + color: #000; +} + +div.sphinxsidebar a { + color: #444; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #005B81; + text-decoration: none; +} + +a:hover { + color: #E32E00; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Arial, sans-serif; + background-color: #BED4EB; + font-weight: normal; + color: #212224; + margin: 30px 0px 10px 0px; + padding: 5px 0 5px 10px; + text-shadow: 0px 1px 0 white +} + +div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 150%; background-color: #C8D5E3; } +div.body h3 { font-size: 120%; background-color: #D8DEE3; } +div.body h4 { font-size: 110%; background-color: #D8DEE3; } +div.body h5 { font-size: 100%; background-color: #D8DEE3; } +div.body h6 { font-size: 100%; background-color: #D8DEE3; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: White; + color: #222; + line-height: 1.2em; + border: 1px solid #C6C9CB; + font-size: 1.1em; + margin: 1.5em 0 1.5em 0; + -webkit-box-shadow: 1px 1px 1px #d8d8d8; + -moz-box-shadow: 1px 1px 1px #d8d8d8; +} + +tt { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ + font-size: 1.1em; + font-family: monospace; +} + +.viewcode-back { + font-family: Arial, sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} diff --git a/portas/doc/source/_static/openstack_logo.png b/portas/doc/source/_static/openstack_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..146faec5cfe3773824f4caf39e4480e4974d10df GIT binary patch literal 3670 zcmV-c4yo~pP)CW75Qp#l)U;+N6jaIz6Nf$t6dNV>^>ETzcpQ=%tMaf0k|rg72+IW`z$FyfE+D{1@tt$t5DmX)*;QV?c;%+5Z&egAgfXTQJq-mZkC z>pFAHu}U=Axde_?s!99ZfDg_+9TYzDa6N1R3adhx&2Mb7>9w`KpMNz!>U5t2XQ8lZ zu+!+H7(PRwF@jAkwvI;|8|=Z_dfzV`Kpi;I!e=|Ql+HAdEag?VZ^Ilw9XJj9N1#1a z?UFC!)X62`CRIe^9YCLKbJ` z&O@f0zt{Z1YDF1utg2$F+rzvrncys+g37Xsd8)idSW(=}t#~qF#qBo29*@^ZCs<$W zpa144=o4g0z63h_ttPfIpH-FyG^MAH+6B~r$(4qw+Uv{2d#h`$lq+i+#Tf%CAzDFUh!pzX(6nW{EASJAQkhm!+}aGpHc z;(+N`S*@tYmump1T37E}J;!$0#F>^M*mT_X1x~bvnp&qP9IHI#bj-0z8FR+=p+e#*w3ugV#wX``sR-CI1!YiQsfc@Om<;1MBw zlfqH9z4Q|m*C?URU1OG(`UYn>Q8<|I!mby#FlN5MMFE8;Pyh$skbR?ngFLt?%nWSkS-#W5umy>@^DyAERP~{E&`M%0(qi&((^ahqL}u^jT<2dcf)p< z%Fxc9J$nh_`>_oNYC?oy`rIDY46Yrw4si3Qn~oXV%dJ}IlUD-40>QipyGa_dV0Z%J ztcEXm5yxR0gySJ04{nnbm#vP=Hq&GI<8VxcZ34pRjt6m%pE2H|!+HBJQrdBdyKHJR z2O_}hp!5bXuwniQYTF>yI|=cjT+2l`9T3|H+l4%ryPxWQm(ODW#8Ctj_CplcO=)qj zD#d~V6BahR9NY1kE5rF)_j<|!Cqnpq0uOKhL%w z>y8OyeTM1?REXc{0|3b=#WPZneh80PxL=Ljau1~+CgtMgg-vccMDX-L z9^7An_;!lFAi`#G_1F*OdM|Z$EVQs0m0$?mY}(baOZ%Zpd62#Pyg!3Jd4d zD^8+lSir&T6Y9-p9L#Wz6$5nXLjdOl?7Lv!TeMr}F14ranauW9=L>ubu*x>Bcrgwp zjrT@{rL*2Fc}Ilwn07QvdJfMOO2=(1Px)6&ih7lg839!Bx&}lQER~T`^7_x@fXo({ zCZMeZYt*!VgMTg>PR)PBaIwubzRY%jjE`-s zG;B}>2!lD=QLOTfQOEZKIEz*;yTJ9(Af0zNv;IDq7#Fr#W{Ap+7Sq1N3TL21X|h2t z=Dk>^bGSsRX-u+cZ23mMB_Ioc0yNIfcfLWB>$hVU3W3>d&a?IM+bGRGt+t}aiv(eh z(D6Z9N>U2|Qxle(!UVTeEKE6W))3WI5z48Rs8d5v0GwmyC8iQiUJO8KS?QwHl2abL zNW+hadDdPc8z%MSOG$l&WR@!!&M{WLmrnS=-0G#&`a)chX>mN9W1>|yqve@lL8a`f zXRmn$B8P=dLxE!2rIi}a*gh%FI4j?C;b@L=WgypiTRf==n6DKr9mUExo6a@{wLM-I z9%V9{!;5G!<8fMYikfEbrGXRQN-9*24}kIIpP&dEg@fiLqAY5|jjv}$P3x0avZODU zdX`c|G>h`1f=3uEu)L9C)H5%frni#HZXcX`TD{iQ-e2qXxj_f%|WW;byDMc%7+uBy}Y?KLC?jp%yyyeBNkqQ-*osw2ex&97Q{#C7%CdSDMNIV zTdC(LEm?&qPcNOjM)h9Grs|M(gsuhV8@96?m4WkQ>j{bJIs)m^neL%ua!i+N8>Lh+ zKu#7rF~VOH@hb{zGXYwys!Um4Vkf+H8Hj6?^eI%kT%j+HA0K=6qdQ@nfR57Q`Jm9T zc)Yg9-`e~BRE!xoKZ z=mP|0Kihr}V1$5sHw$QekmoL)lQ;~@H$S)}s3xuwypiubB?1%OyBpwC08TH!=?BrQ zhOp`PTu;%u0}Q=XKGb7d$g8*;de8c1UI|Re2R;;Radh_D!FIZg+JP`oJg>5 z;&B7eVAomZe>j~hOOIVRO_Q7eSGz37hxmnsG!n%HX`C6gSqFcg(RLmikn%EPR*wel zrsc;>!vQ<>2ZW`lk`MbNLopFd#_9mh8iKPH;KbjC@xJU${pdxuTF{uO(eG#9t*>XP z_4Seh`r_#q$^xeiuy(=eSouv66cpS!t3n`|j`6xnmSs1q@;0!I)m<6eYHHGMRdB87 ziruozT=gn@yp`B9oGxD-b7PqhZum|oJCfLB38&8v51ijj-Pb`qvCr3FtJ0aFms2h3(n0-}3jJ~J$ zCzep7-MIZFbo$(m8zWm?SoRl__blLE+!fFBVVk1&XLg+vmVNcTk9O2+q?x#F0LZUN zu6oM~C)(7^0|az4nM}@aZf<@RkH0CR8<-Yn-fZe+Dbr#iJWSt#tnR4^h<@ePXWmeHIO4q^X zCbiy(=k3R1o1}0E+7x*OOe-qnIXG{#N_rqK*1NH}Qz6aumTR`YTgo5K=q=61;5@b- zrgUA_Qz=)(TPN!tCZE|{?B0*r9ov5Fcip6xQ2;Yqs*2_o7TFKGp0|~bcP@6+a(rz^ zXXmmyBfT}ucw_t(6s+f^t_)nc>RKW<-q_&J35vN+RPLsR?VAsQeHLyCR7AWvxFOVc zAg-xl=j*RipzaKWx3lAf?ei`PoM;bbAL>svH?JqQwjSulb9bghytRt%*5x-no>xlf zh7qj0LYRXVDU})?Btsy7^71*ujsEP_ACyd)P)*ULWBCXox@PUfwmQ#)Vl&oeIqpQY zHMgU+xe0EhQ)RmjdB3JHGdrsvJ9?A=WwOrn)J?BH{+D&O_@SKdrj2|8Z{hS1T(k>&Zlt;p=tqw*mVY1aLt=u^eAHkW>8cb#@q& z4-SLa@ii zCt7NGrLv)1Scy9ew-sOwwLYn2a6T#KzJgnbacm7Z20q6tcs~C!0DI+r(=$l+x{=W0A}~0&W)ll4*&oF07*qoM6N<$f~n6U7ytkO literal 0 HcmV?d00001 diff --git a/portas/doc/source/_static/tweaks.css b/portas/doc/source/_static/tweaks.css new file mode 100644 index 0000000..3f3fb3f --- /dev/null +++ b/portas/doc/source/_static/tweaks.css @@ -0,0 +1,94 @@ +body { + background: #fff url(../_static/header_bg.jpg) top left no-repeat; +} + +#header { + width: 950px; + margin: 0 auto; + height: 102px; +} + +#header h1#logo { + background: url(../_static/openstack_logo.png) top left no-repeat; + display: block; + float: left; + text-indent: -9999px; + width: 175px; + height: 55px; +} + +#navigation { + background: url(../_static/header-line.gif) repeat-x 0 bottom; + display: block; + float: left; + margin: 27px 0 0 25px; + padding: 0; +} + +#navigation li{ + float: left; + display: block; + margin-right: 25px; +} + +#navigation li a { + display: block; + font-weight: normal; + text-decoration: none; + background-position: 50% 0; + padding: 20px 0 5px; + color: #353535; + font-size: 14px; +} + +#navigation li a.current, #navigation li a.section { + border-bottom: 3px solid #cf2f19; + color: #cf2f19; +} + +div.related { + background-color: #cde2f8; + border: 1px solid #b0d3f8; +} + +div.related a { + color: #4078ba; + text-shadow: none; +} + +div.sphinxsidebarwrapper { + padding-top: 0; +} + +pre { + color: #555; +} + +div.documentwrapper h1, div.documentwrapper h2, div.documentwrapper h3, div.documentwrapper h4, div.documentwrapper h5, div.documentwrapper h6 { + font-family: 'PT Sans', sans-serif !important; + color: #264D69; + border-bottom: 1px dotted #C5E2EA; + padding: 0; + background: none; + padding-bottom: 5px; +} + +div.documentwrapper h3 { + color: #CF2F19; +} + +a.headerlink { + color: #fff !important; + margin-left: 5px; + background: #CF2F19 !important; +} + +div.body { + margin-top: -25px; + margin-left: 230px; +} + +div.document { + width: 960px; + margin: 0 auto; +} \ No newline at end of file diff --git a/portas/doc/source/_templates/.placeholder b/portas/doc/source/_templates/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/portas/doc/source/_theme/layout.html b/portas/doc/source/_theme/layout.html new file mode 100644 index 0000000..750b782 --- /dev/null +++ b/portas/doc/source/_theme/layout.html @@ -0,0 +1,83 @@ +{% extends "basic/layout.html" %} +{% set css_files = css_files + ['_static/tweaks.css'] %} +{% set script_files = script_files + ['_static/jquery.tweet.js'] %} + +{%- macro sidebar() %} + {%- if not embedded %}{% if not theme_nosidebar|tobool %} +
    +
    + {%- block sidebarlogo %} + {%- if logo %} + + {%- endif %} + {%- endblock %} + {%- block sidebartoc %} + {%- if display_toc %} +

    {{ _('Table Of Contents') }}

    + {{ toc }} + {%- endif %} + {%- endblock %} + {%- block sidebarrel %} + {%- if prev %} +

    {{ _('Previous topic') }}

    +

    {{ prev.title }}

    + {%- endif %} + {%- if next %} +

    {{ _('Next topic') }}

    +

    {{ next.title }}

    + {%- endif %} + {%- endblock %} + {%- block sidebarsourcelink %} + {%- if show_source and has_source and sourcename %} +

    {{ _('This Page') }}

    + + {%- endif %} + {%- endblock %} + {%- if customsidebar %} + {% include customsidebar %} + {%- endif %} + {%- block sidebarsearch %} + {%- if pagename != "search" %} + + + {%- endif %} + {%- endblock %} +
    +
    + {%- endif %}{% endif %} +{%- endmacro %} + +{% block relbar1 %}{% endblock relbar1 %} + +{% block header %} + +{% endblock %} \ No newline at end of file diff --git a/portas/doc/source/_theme/theme.conf b/portas/doc/source/_theme/theme.conf new file mode 100644 index 0000000..1cc4004 --- /dev/null +++ b/portas/doc/source/_theme/theme.conf @@ -0,0 +1,4 @@ +[theme] +inherit = basic +stylesheet = nature.css +pygments_style = tango diff --git a/portas/doc/source/conf.py b/portas/doc/source/conf.py new file mode 100644 index 0000000..8b546d6 --- /dev/null +++ b/portas/doc/source/conf.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2010 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Glance documentation build configuration file, created by +# sphinx-quickstart on Tue May 18 13:50:15 2010. +# +# This file is execfile()'d with the current directory set to its containing +# dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import sys + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path = [os.path.abspath('../../glance'), + os.path.abspath('../..'), + os.path.abspath('../../bin') + ] + sys.path + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.coverage', + 'sphinx.ext.ifconfig', + 'sphinx.ext.intersphinx', + 'sphinx.ext.pngmath', + 'sphinx.ext.graphviz'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = [] +if os.getenv('HUDSON_PUBLISH_DOCS'): + templates_path = ['_ga', '_templates'] +else: + templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Glance' +copyright = u'2010, OpenStack Foundation.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +from glance.version import version_info as glance_version +# The full version, including alpha/beta/rc tags. +release = glance_version.version_string_with_vcs() +# The short X.Y version. +version = glance_version.canonical_version_string() + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['api'] + +# The reST default role (for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['glance.'] + +# -- Options for man page output -------------------------------------------- + +# Grouping the document tree for man pages. +# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' + +man_pages = [ + ('man/glance', 'glance', u'Glance CLI', + [u'OpenStack'], 1), + ('man/glanceapi', 'glance-api', u'Glance API Server', + [u'OpenStack'], 1), + ('man/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner', + [u'OpenStack'], 1), + ('man/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager', + [u'OpenStack'], 1), + ('man/glancecacheprefetcher', 'glance-cache-prefetcher', + u'Glance Cache Pre-fetcher', [u'OpenStack'], 1), + ('man/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner', + [u'OpenStack'], 1), + ('man/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ', + [u'OpenStack'], 1), + ('man/glancemanage', 'glance-manage', u'Glance Management Utility', + [u'OpenStack'], 1), + ('man/glanceregistry', 'glance-registry', u'Glance Registry Server', + [u'OpenStack'], 1), + ('man/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service', + [u'OpenStack'], 1) +] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme_path = ["."] +html_theme = '_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = ['_theme'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' +git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" +html_last_updated_fmt = os.popen(git_cmd).read() + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +html_use_modindex = False + +# If false, no index is generated. +html_use_index = False + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'glancedoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, +# documentclass [howto/manual]). +latex_documents = [ + ('index', 'Glance.tex', u'Glance Documentation', + u'Glance Team', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'python': ('http://docs.python.org/', None), + 'nova': ('http://nova.openstack.org', None), + 'swift': ('http://swift.openstack.org', None)} diff --git a/portas/doc/source/index.rst b/portas/doc/source/index.rst new file mode 100644 index 0000000..842c91e --- /dev/null +++ b/portas/doc/source/index.rst @@ -0,0 +1,83 @@ +.. + Copyright 2010 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Welcome to Glance's documentation! +================================== + +The Glance project provides services for discovering, registering, and +retrieving virtual machine images. Glance has a RESTful API that allows +querying of VM image metadata as well as retrieval of the actual image. + +VM images made available through Glance can be stored in a variety of +locations from simple filesystems to object-storage systems like the +OpenStack Swift project. + +Glance, as with all OpenStack projects, is written with the following design +guidelines in mind: + +* **Component based architecture**: Quickly add new behaviors +* **Highly available**: Scale to very serious workloads +* **Fault tolerant**: Isolated processes avoid cascading failures +* **Recoverable**: Failures should be easy to diagnose, debug, and rectify +* **Open standards**: Be a reference implementation for a community-driven api + +This documentation is generated by the Sphinx toolkit and lives in the source +tree. Additional documentation on Glance and other components of OpenStack can +be found on the `OpenStack wiki`_. + +.. _`OpenStack wiki`: http://wiki.openstack.org + +Concepts +======== + +.. toctree:: + :maxdepth: 1 + + identifiers + statuses + formats + common-image-properties + +Installing/Configuring Glance +============================= + +.. toctree:: + :maxdepth: 1 + + installing + configuring + authentication + policies + +Operating Glance +================ + +.. toctree:: + :maxdepth: 1 + + controllingservers + db + cache + notifications + +Using Glance +============ + +.. toctree:: + :maxdepth: 1 + + glanceapi + glanceclient diff --git a/portas/etc/portas-api-paste.ini b/portas/etc/portas-api-paste.ini new file mode 100644 index 0000000..8dbdc8c --- /dev/null +++ b/portas/etc/portas-api-paste.ini @@ -0,0 +1,57 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose = True +# Show debugging output in logs (sets DEBUG log level output) +debug = True +# Address to bind the server to +bind_host = 0.0.0.0 +# Port the bind the server to +bind_port = 8082 +# Log to this file. Make sure the user running skeleton-api has +# permissions to write to this file! +log_file = /tmp/api.log +# Orchestration Adapter Section +# +#provider - Cloud provider to use (openstack, amazon, dummy) +provider = openstack + +# Heat specific parameters +#heat_url - url for the heat service +# [auto] - find in the keystone +heat_url = auto + +#heat_api_version - version of the API to use +# +heat_api_version = 1 + + +[pipeline:windc-api] +pipeline = apiv1app +# NOTE: use the following pipeline for keystone +#pipeline = authtoken context apiv1app + +[app:apiv1app] +paste.app_factory = windc.common.wsgi:app_factory +windc.app_factory = windc.api.v1.router:API + +[filter:context] +paste.filter_factory = windc.common.wsgi:filter_factory +windc.filter_factory = windc.common.context:ContextMiddleware + +[filter:authtoken] +paste.filter_factory = keystone.middleware.auth_token:filter_factory +auth_host = 172.18.67.57 +auth_port = 35357 +auth_protocol = http +auth_uri = http://172.18.67.57:5000/v2.0/ +admin_tenant_name = service +admin_user = windc +admin_password = 000 + +[filter:auth-context] +paste.filter_factory = windc.common.wsgi:filter_factory +windc.filter_factory = keystone.middleware.balancer_auth_token:KeystoneContextMiddleware + +[rabbitmq] +host = 10.0.0.1 +vhost = keero \ No newline at end of file diff --git a/portas/etc/portas-api.conf b/portas/etc/portas-api.conf new file mode 100644 index 0000000..3f1381b --- /dev/null +++ b/portas/etc/portas-api.conf @@ -0,0 +1,34 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +debug = True + +# Address to bind the server to +bind_host = 0.0.0.0 + +# Port the bind the server to +bind_port = 8082 + +# Log to this file. Make sure the user running skeleton-api has +# permissions to write to this file! +log_file = /tmp/api.log + +[pipeline:windc-api] +pipeline = versionnegotiation context apiv1app + +[pipeline:versions] +pipeline = versionsapp + +[app:versionsapp] +paste.app_factory = windc.api.versions:app_factory + +[app:apiv1app] +paste.app_factory = windc.api.v1:app_factory + +[filter:versionnegotiation] +paste.filter_factory = windc.api.middleware.version_negotiation:filter_factory + +[filter:context] +paste.filter_factory = openstack.common.middleware.context:filter_factory diff --git a/portas/openstack-common.conf b/portas/openstack-common.conf new file mode 100644 index 0000000..c3faaa6 --- /dev/null +++ b/portas/openstack-common.conf @@ -0,0 +1,7 @@ +[DEFAULT] + +# The list of modules to copy from openstack-common +modules=setup,wsgi,config,exception,gettextutilsl,jsonutils,log,xmlutils,sslutils,service,notifier,local,install_venv_common + +# The base module to hold the copy of openstack.common +base=portas \ No newline at end of file diff --git a/portas/portas/__init__.py b/portas/portas/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/api/__init__.py b/portas/portas/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/api/v1/__init__.py b/portas/portas/api/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/api/v1/environments.py b/portas/portas/api/v1/environments.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/api/v1/router.py b/portas/portas/api/v1/router.py new file mode 100644 index 0000000..5631774 --- /dev/null +++ b/portas/portas/api/v1/router.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from glance.api.v1 import images +from glance.api.v1 import members +from glance.common import wsgi + + +class API(wsgi.Router): + + """WSGI router for Glance v1 API requests.""" + + def __init__(self, mapper): + images_resource = images.create_resource() + + mapper.resource("image", "images", controller=images_resource, + collection={'detail': 'GET'}) + mapper.connect("/", controller=images_resource, action="index") + mapper.connect("/images/{id}", controller=images_resource, + action="meta", conditions=dict(method=["HEAD"])) + + members_resource = members.create_resource() + + mapper.resource("member", "members", controller=members_resource, + parent_resource=dict(member_name='image', + collection_name='images')) + mapper.connect("/shared-images/{id}", + controller=members_resource, + action="index_shared_images") + mapper.connect("/images/{image_id}/members", + controller=members_resource, + action="update_all", + conditions=dict(method=["PUT"])) + + super(API, self).__init__(mapper) diff --git a/portas/portas/common/__init__.py b/portas/portas/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/common/config.py b/portas/portas/common/config.py new file mode 100644 index 0000000..fb8e235 --- /dev/null +++ b/portas/portas/common/config.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Glance +""" + +import logging +import logging.config +import logging.handlers +import os +import sys + +from oslo.config import cfg +from paste import deploy + +from glance.version import version_info as version + +paste_deploy_opts = [ + cfg.StrOpt('flavor'), + cfg.StrOpt('config_file'), +] +common_opts = [ + cfg.BoolOpt('allow_additional_image_properties', default=True, + help=_('Whether to allow users to specify image properties ' + 'beyond what the image schema provides')), + cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api', + help=_('Python module path of data access API')), + cfg.IntOpt('limit_param_default', default=25, + help=_('Default value for the number of items returned by a ' + 'request if not specified explicitly in the request')), + cfg.IntOpt('api_limit_max', default=1000, + help=_('Maximum permissible number of items that could be ' + 'returned by a request')), + cfg.BoolOpt('show_image_direct_url', default=False, + help=_('Whether to include the backend image storage location ' + 'in image properties. Revealing storage location can be a ' + 'security risk, so use this setting with caution!')), + cfg.IntOpt('image_size_cap', default=1099511627776, + help=_("Maximum size of image a user can upload in bytes. " + "Defaults to 1099511627776 bytes (1 TB).")), + cfg.BoolOpt('enable_v1_api', default=True, + help=_("Deploy the v1 OpenStack Images API. ")), + cfg.BoolOpt('enable_v2_api', default=True, + help=_("Deploy the v2 OpenStack Images API. ")), + cfg.StrOpt('pydev_worker_debug_host', default=None, + help=_('The hostname/IP of the pydev process listening for ' + 'debug connections')), + cfg.IntOpt('pydev_worker_debug_port', default=5678, + help=_('The port on which a pydev process is listening for ' + 'connections.')), +] + +CONF = cfg.CONF +CONF.register_opts(paste_deploy_opts, group='paste_deploy') +CONF.register_opts(common_opts) + +CONF.import_opt('verbose', 'glance.openstack.common.log') +CONF.import_opt('debug', 'glance.openstack.common.log') +CONF.import_opt('log_dir', 'glance.openstack.common.log') +CONF.import_opt('log_file', 'glance.openstack.common.log') +CONF.import_opt('log_config', 'glance.openstack.common.log') +CONF.import_opt('log_format', 'glance.openstack.common.log') +CONF.import_opt('log_date_format', 'glance.openstack.common.log') +CONF.import_opt('use_syslog', 'glance.openstack.common.log') +CONF.import_opt('syslog_log_facility', 'glance.openstack.common.log') + + +def parse_args(args=None, usage=None, default_config_files=None): + CONF(args=args, + project='glance', + version=version.cached_version_string(), + usage=usage, + default_config_files=default_config_files) + + +def parse_cache_args(args=None): + config_files = cfg.find_config_files(project='glance', prog='glance-cache') + parse_args(args=args, default_config_files=config_files) + + +def setup_logging(): + """ + Sets up the logging options for a log with supplied name + """ + + if CONF.log_config: + # Use a logging configuration file for all settings... + if os.path.exists(CONF.log_config): + logging.config.fileConfig(CONF.log_config) + return + else: + raise RuntimeError("Unable to locate specified logging " + "config file: %s" % CONF.log_config) + + root_logger = logging.root + if CONF.debug: + root_logger.setLevel(logging.DEBUG) + elif CONF.verbose: + root_logger.setLevel(logging.INFO) + else: + root_logger.setLevel(logging.WARNING) + + formatter = logging.Formatter(CONF.log_format, CONF.log_date_format) + + if CONF.use_syslog: + try: + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility) + except AttributeError: + raise ValueError(_("Invalid syslog facility")) + + handler = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + elif CONF.log_file: + logfile = CONF.log_file + if CONF.log_dir: + logfile = os.path.join(CONF.log_dir, logfile) + handler = logging.handlers.WatchedFileHandler(logfile) + else: + handler = logging.StreamHandler(sys.stdout) + + handler.setFormatter(formatter) + root_logger.addHandler(handler) + + +def _get_deployment_flavor(): + """ + Retrieve the paste_deploy.flavor config item, formatted appropriately + for appending to the application name. + """ + flavor = CONF.paste_deploy.flavor + return '' if not flavor else ('-' + flavor) + + +def _get_paste_config_path(): + paste_suffix = '-paste.ini' + conf_suffix = '.conf' + if CONF.config_file: + # Assume paste config is in a paste.ini file corresponding + # to the last config file + path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) + else: + path = CONF.prog + '-paste.ini' + return CONF.find_file(os.path.basename(path)) + + +def _get_deployment_config_file(): + """ + Retrieve the deployment_config_file config item, formatted as an + absolute pathname. + """ + path = CONF.paste_deploy.config_file + if not path: + path = _get_paste_config_path() + if not path: + msg = "Unable to locate paste config file for %s." % CONF.prog + raise RuntimeError(msg) + return os.path.abspath(path) + + +def load_paste_app(app_name=None): + """ + Builds and returns a WSGI app from a paste config file. + + We assume the last config file specified in the supplied ConfigOpts + object is the paste config file. + + :param app_name: name of the application to load + + :raises RuntimeError when config file cannot be located or application + cannot be loaded from config file + """ + if app_name is None: + app_name = CONF.prog + + # append the deployment flavor to the application name, + # in order to identify the appropriate paste pipeline + app_name += _get_deployment_flavor() + + conf_file = _get_deployment_config_file() + + try: + logger = logging.getLogger(__name__) + logger.debug(_("Loading %(app_name)s from %(conf_file)s"), + {'conf_file': conf_file, 'app_name': app_name}) + + app = deploy.loadapp("config:%s" % conf_file, name=app_name) + + # Log the options used when starting if we're in debug mode... + if CONF.debug: + CONF.log_opt_values(logger, logging.DEBUG) + + return app + except (LookupError, ImportError), e: + msg = _("Unable to load %(app_name)s from " + "configuration file %(conf_file)s." + "\nGot: %(e)r") % locals() + logger.error(msg) + raise RuntimeError(msg) diff --git a/portas/portas/common/exception.py b/portas/portas/common/exception.py new file mode 100644 index 0000000..fb1435d --- /dev/null +++ b/portas/portas/common/exception.py @@ -0,0 +1,272 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Glance exception subclasses""" + +import urlparse + +_FATAL_EXCEPTION_FORMAT_ERRORS = False + + +class RedirectException(Exception): + def __init__(self, url): + self.url = urlparse.urlparse(url) + + +class GlanceException(Exception): + """ + Base Glance Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred") + + def __init__(self, message=None, *args, **kwargs): + if not message: + message = self.message + try: + message = message % kwargs + except Exception as e: + if _FATAL_EXCEPTION_FORMAT_ERRORS: + raise e + else: + # at least get the core message out if something happened + pass + + super(GlanceException, self).__init__(message) + + +class MissingArgumentError(GlanceException): + message = _("Missing required argument.") + + +class MissingCredentialError(GlanceException): + message = _("Missing required credential: %(required)s") + + +class BadAuthStrategy(GlanceException): + message = _("Incorrect auth strategy, expected \"%(expected)s\" but " + "received \"%(received)s\"") + + +class NotFound(GlanceException): + message = _("An object with the specified identifier was not found.") + + +class UnknownScheme(GlanceException): + message = _("Unknown scheme '%(scheme)s' found in URI") + + +class BadStoreUri(GlanceException): + message = _("The Store URI was malformed.") + + +class Duplicate(GlanceException): + message = _("An object with the same identifier already exists.") + + +class StorageFull(GlanceException): + message = _("There is not enough disk space on the image storage media.") + + +class StorageWriteDenied(GlanceException): + message = _("Permission to write image storage media denied.") + + +class AuthBadRequest(GlanceException): + message = _("Connect error/bad request to Auth service at URL %(url)s.") + + +class AuthUrlNotFound(GlanceException): + message = _("Auth service at URL %(url)s not found.") + + +class AuthorizationFailure(GlanceException): + message = _("Authorization failed.") + + +class NotAuthenticated(GlanceException): + message = _("You are not authenticated.") + + +class Forbidden(GlanceException): + message = _("You are not authorized to complete this action.") + + +class ForbiddenPublicImage(Forbidden): + message = _("You are not authorized to complete this action.") + + +class ProtectedImageDelete(Forbidden): + message = _("Image %(image_id)s is protected and cannot be deleted.") + + +#NOTE(bcwaldon): here for backwards-compatability, need to deprecate. +class NotAuthorized(Forbidden): + message = _("You are not authorized to complete this action.") + + +class Invalid(GlanceException): + message = _("Data supplied was not valid.") + + +class InvalidSortKey(Invalid): + message = _("Sort key supplied was not valid.") + + +class InvalidFilterRangeValue(Invalid): + message = _("Unable to filter using the specified range.") + + +class ReadonlyProperty(Forbidden): + message = _("Attribute '%(property)s' is read-only.") + + +class ReservedProperty(Forbidden): + message = _("Attribute '%(property)s' is reserved.") + + +class AuthorizationRedirect(GlanceException): + message = _("Redirecting to %(uri)s for authorization.") + + +class DatabaseMigrationError(GlanceException): + message = _("There was an error migrating the database.") + + +class ClientConnectionError(GlanceException): + message = _("There was an error connecting to a server") + + +class ClientConfigurationError(GlanceException): + message = _("There was an error configuring the client.") + + +class MultipleChoices(GlanceException): + message = _("The request returned a 302 Multiple Choices. This generally " + "means that you have not included a version indicator in a " + "request URI.\n\nThe body of response returned:\n%(body)s") + + +class LimitExceeded(GlanceException): + message = _("The request returned a 413 Request Entity Too Large. This " + "generally means that rate limiting or a quota threshold was " + "breached.\n\nThe response body:\n%(body)s") + + def __init__(self, *args, **kwargs): + self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') + else None) + super(LimitExceeded, self).__init__(*args, **kwargs) + + +class ServiceUnavailable(GlanceException): + message = _("The request returned 503 Service Unavilable. This " + "generally occurs on service overload or other transient " + "outage.") + + def __init__(self, *args, **kwargs): + self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') + else None) + super(ServiceUnavailable, self).__init__(*args, **kwargs) + + +class ServerError(GlanceException): + message = _("The request returned 500 Internal Server Error.") + + +class UnexpectedStatus(GlanceException): + message = _("The request returned an unexpected status: %(status)s." + "\n\nThe response body:\n%(body)s") + + +class InvalidContentType(GlanceException): + message = _("Invalid content type %(content_type)s") + + +class BadRegistryConnectionConfiguration(GlanceException): + message = _("Registry was not configured correctly on API server. " + "Reason: %(reason)s") + + +class BadStoreConfiguration(GlanceException): + message = _("Store %(store_name)s could not be configured correctly. " + "Reason: %(reason)s") + + +class BadDriverConfiguration(GlanceException): + message = _("Driver %(driver_name)s could not be configured correctly. " + "Reason: %(reason)s") + + +class StoreDeleteNotSupported(GlanceException): + message = _("Deleting images from this store is not supported.") + + +class StoreAddDisabled(GlanceException): + message = _("Configuration for store failed. Adding images to this " + "store is disabled.") + + +class InvalidNotifierStrategy(GlanceException): + message = _("'%(strategy)s' is not an available notifier strategy.") + + +class MaxRedirectsExceeded(GlanceException): + message = _("Maximum redirects (%(redirects)s) was exceeded.") + + +class InvalidRedirect(GlanceException): + message = _("Received invalid HTTP redirect.") + + +class NoServiceEndpoint(GlanceException): + message = _("Response from Keystone does not contain a Glance endpoint.") + + +class RegionAmbiguity(GlanceException): + message = _("Multiple 'image' service matches for region %(region)s. This " + "generally means that a region is required and you have not " + "supplied one.") + + +class WorkerCreationFailure(GlanceException): + message = _("Server worker creation failed: %(reason)s.") + + +class SchemaLoadError(GlanceException): + message = _("Unable to load schema: %(reason)s") + + +class InvalidObject(GlanceException): + message = _("Provided object does not match schema " + "'%(schema)s': %(reason)s") + + +class UnsupportedHeaderFeature(GlanceException): + message = _("Provided header feature is unsupported: %(feature)s") + + +class InUseByStore(GlanceException): + message = _("The image cannot be deleted because it is in use through " + "the backend store outside of Glance.") + + +class ImageSizeLimitExceeded(GlanceException): + message = _("The provided image is too large.") diff --git a/portas/portas/db/__init__.py b/portas/portas/db/__init__.py new file mode 100644 index 0000000..0a754b6 --- /dev/null +++ b/portas/portas/db/__init__.py @@ -0,0 +1 @@ +__author__ = 'sad' diff --git a/portas/portas/db/api.py b/portas/portas/db/api.py new file mode 100644 index 0000000..0a754b6 --- /dev/null +++ b/portas/portas/db/api.py @@ -0,0 +1 @@ +__author__ = 'sad' diff --git a/portas/portas/db/migrate_repo/README b/portas/portas/db/migrate_repo/README new file mode 100644 index 0000000..6218f8c --- /dev/null +++ b/portas/portas/db/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/portas/portas/db/migrate_repo/__init__.py b/portas/portas/db/migrate_repo/__init__.py new file mode 100644 index 0000000..2f288d3 --- /dev/null +++ b/portas/portas/db/migrate_repo/__init__.py @@ -0,0 +1 @@ +# template repository default module diff --git a/portas/portas/db/migrate_repo/manage.py b/portas/portas/db/migrate_repo/manage.py new file mode 100644 index 0000000..67ac41c --- /dev/null +++ b/portas/portas/db/migrate_repo/manage.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# Copyright (c) 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from migrate.versioning.shell import main + +# This should probably be a console script entry point. +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/portas/portas/db/migrate_repo/migrate.cfg b/portas/portas/db/migrate_repo/migrate.cfg new file mode 100644 index 0000000..6761c45 --- /dev/null +++ b/portas/portas/db/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=Glance Migrations + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/portas/portas/db/migrate_repo/versions/001_add_initial_tables.py b/portas/portas/db/migrate_repo/versions/001_add_initial_tables.py new file mode 100644 index 0000000..1200328 --- /dev/null +++ b/portas/portas/db/migrate_repo/versions/001_add_initial_tables.py @@ -0,0 +1,41 @@ +from sqlalchemy.schema import MetaData, Table, Column, ForeignKey +from sqlalchemy.types import Integer, String, Text, DateTime + + +meta = MetaData() + +Table('datacenter', meta, + Column('id', String(32), primary_key=True), + Column('name', String(255)), + Column('type', String(255)), + Column('version', String(255)), + Column('tenant_id',String(100)), + Column('KMS', String(80)), + Column('WSUS', String(80)), + Column('extra', Text()), +) + +Table('service', meta, + Column('id', String(32), primary_key=True), + Column('datacenter_id', String(32), ForeignKey('datacenter.id')), + Column('name', String(255)), + Column('type', String(40)), + Column('status', String(255)), + Column('tenant_id', String(40)), + Column('created_at', DateTime, nullable=False), + Column('updated_at', DateTime, nullable=False), + Column('deployed', String(40)), + Column('vm_id',String(40)), + Column('extra', Text()), +) + + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + meta.create_all() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + meta.drop_all() diff --git a/portas/portas/db/migrate_repo/versions/__init__.py b/portas/portas/db/migrate_repo/versions/__init__.py new file mode 100644 index 0000000..507b5ff --- /dev/null +++ b/portas/portas/db/migrate_repo/versions/__init__.py @@ -0,0 +1 @@ +# template repository default versions module diff --git a/portas/portas/db/models.py b/portas/portas/db/models.py new file mode 100644 index 0000000..7901f37 --- /dev/null +++ b/portas/portas/db/models.py @@ -0,0 +1,181 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy models for glance data +""" + +from sqlalchemy import Column, Integer, String, BigInteger +from sqlalchemy.ext.compiler import compiles +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey, DateTime, Boolean, Text +from sqlalchemy.orm import relationship, backref, object_mapper +from sqlalchemy import UniqueConstraint + +import glance.db.sqlalchemy.api +from glance.openstack.common import timeutils +from glance.openstack.common import uuidutils + +BASE = declarative_base() + + +@compiles(BigInteger, 'sqlite') +def compile_big_int_sqlite(type_, compiler, **kw): + return 'INTEGER' + + +class ModelBase(object): + """Base class for Nova and Glance Models""" + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + __protected_attributes__ = set([ + "created_at", "updated_at", "deleted_at", "deleted"]) + + created_at = Column(DateTime, default=timeutils.utcnow, + nullable=False) + updated_at = Column(DateTime, default=timeutils.utcnow, + nullable=False, onupdate=timeutils.utcnow) + deleted_at = Column(DateTime) + deleted = Column(Boolean, nullable=False, default=False) + + def save(self, session=None): + """Save this object""" + session = session or glance.db.sqlalchemy.api.get_session() + session.add(self) + session.flush() + + def delete(self, session=None): + """Delete this object""" + self.deleted = True + self.deleted_at = timeutils.utcnow() + self.save(session=session) + + def update(self, values): + """dict.update() behaviour.""" + for k, v in values.iteritems(): + self[k] = v + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def __iter__(self): + self._i = iter(object_mapper(self).columns) + return self + + def next(self): + n = self._i.next().name + return n, getattr(self, n) + + def keys(self): + return self.__dict__.keys() + + def values(self): + return self.__dict__.values() + + def items(self): + return self.__dict__.items() + + def to_dict(self): + return self.__dict__.copy() + + +class Image(BASE, ModelBase): + """Represents an image in the datastore""" + __tablename__ = 'images' + + id = Column(String(36), primary_key=True, default=uuidutils.generate_uuid) + name = Column(String(255)) + disk_format = Column(String(20)) + container_format = Column(String(20)) + size = Column(BigInteger) + status = Column(String(30), nullable=False) + is_public = Column(Boolean, nullable=False, default=False) + checksum = Column(String(32)) + min_disk = Column(Integer(), nullable=False, default=0) + min_ram = Column(Integer(), nullable=False, default=0) + owner = Column(String(255)) + protected = Column(Boolean, nullable=False, default=False) + + +class ImageProperty(BASE, ModelBase): + """Represents an image properties in the datastore""" + __tablename__ = 'image_properties' + __table_args__ = (UniqueConstraint('image_id', 'name'), {}) + + id = Column(Integer, primary_key=True) + image_id = Column(String(36), ForeignKey('images.id'), + nullable=False) + image = relationship(Image, backref=backref('properties')) + + name = Column(String(255), index=True, nullable=False) + value = Column(Text) + + +class ImageTag(BASE, ModelBase): + """Represents an image tag in the datastore""" + __tablename__ = 'image_tags' + + id = Column(Integer, primary_key=True, nullable=False) + image_id = Column(String(36), ForeignKey('images.id'), nullable=False) + value = Column(String(255), nullable=False) + + +class ImageLocation(BASE, ModelBase): + """Represents an image location in the datastore""" + __tablename__ = 'image_locations' + + id = Column(Integer, primary_key=True, nullable=False) + image_id = Column(String(36), ForeignKey('images.id'), nullable=False) + image = relationship(Image, backref=backref('locations')) + value = Column(Text(), nullable=False) + + +class ImageMember(BASE, ModelBase): + """Represents an image members in the datastore""" + __tablename__ = 'image_members' + __table_args__ = (UniqueConstraint('image_id', 'member'), {}) + + id = Column(Integer, primary_key=True) + image_id = Column(String(36), ForeignKey('images.id'), + nullable=False) + image = relationship(Image, backref=backref('members')) + + member = Column(String(255), nullable=False) + can_share = Column(Boolean, nullable=False, default=False) + status = Column(String(20), nullable=False, default="pending") + + +def register_models(engine): + """ + Creates database tables for all models with the given engine + """ + models = (Image, ImageProperty, ImageMember) + for model in models: + model.metadata.create_all(engine) + + +def unregister_models(engine): + """ + Drops database tables for all models with the given engine + """ + models = (Image, ImageProperty) + for model in models: + model.metadata.drop_all(engine) diff --git a/portas/portas/db/session.py b/portas/portas/db/session.py new file mode 100644 index 0000000..c8e1c1c --- /dev/null +++ b/portas/portas/db/session.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Session management functions.""" + +import os +import logging + +from migrate.versioning import api as versioning_api +from migrate import exceptions as versioning_exceptions +from sqlalchemy import create_engine +from sqlalchemy.engine.url import make_url +from sqlalchemy.orm import sessionmaker +from sqlalchemy.pool import NullPool +from sqlalchemy.exc import DisconnectionError + +from windc.common import cfg +from windc.db import migrate_repo + + +DB_GROUP_NAME = 'sql' +DB_OPTIONS = ( + cfg.IntOpt('idle_timeout', default=3600), + cfg.StrOpt('connection', default='sqlite:///windc.sqlite'), +) + +MAKER = None +ENGINE = None + + +class MySQLPingListener(object): + """ + Ensures that MySQL connections checked out of the + pool are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + + Error codes caught: + * 2006 MySQL server has gone away + * 2013 Lost connection to MySQL server during query + * 2014 Commands out of sync; you can't run this command now + * 2045 Can't open shared memory; no answer from server (%lu) + * 2055 Lost connection to MySQL server at '%s', system error: %d + + from http://dev.mysql.com/doc/refman/5.6/ru_RU/error-messages-client.html + """ + + def checkout(self, dbapi_con, con_record, con_proxy): + try: + dbapi_con.cursor().execute('select 1') + except dbapi_con.OperationalError, ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + logging.warn('Got mysql server has gone away: %s', ex) + raise DisconnectionError("Database server went away") + else: + raise + + +def get_session(conf, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy session.""" + global MAKER + + if MAKER is None: + MAKER = sessionmaker(autocommit=autocommit, + expire_on_commit=expire_on_commit) + engine = get_engine(conf) + MAKER.configure(bind=engine) + session = MAKER() + return session + + +def get_engine(conf): + """Return a SQLAlchemy engine.""" + global ENGINE + + register_conf_opts(conf) + connection_url = make_url(conf.sql.connection) + if ENGINE is None or not ENGINE.url == connection_url: + engine_args = {'pool_recycle': conf.sql.idle_timeout, + 'echo': False, + 'convert_unicode': True + } + if 'sqlite' in connection_url.drivername: + engine_args['poolclass'] = NullPool + if 'mysql' in connection_url.drivername: + engine_args['listeners'] = [MySQLPingListener()] + ENGINE = create_engine(conf.sql.connection, **engine_args) + return ENGINE + + +def register_conf_opts(conf, options=DB_OPTIONS, group=DB_GROUP_NAME): + """Register database options.""" + + conf.register_group(cfg.OptGroup(name=group)) + conf.register_opts(options, group=group) + + +def sync(conf): + register_conf_opts(conf) + repo_path = os.path.abspath(os.path.dirname(migrate_repo.__file__)) + try: + versioning_api.upgrade(conf.sql.connection, repo_path) + except versioning_exceptions.DatabaseNotControlledError: + versioning_api.version_control(conf.sql.connection, repo_path) + versioning_api.upgrade(conf.sql.connection, repo_path) diff --git a/portas/portas/locale/ru/LC_MESSAGES/portas.po b/portas/portas/locale/ru/LC_MESSAGES/portas.po new file mode 100644 index 0000000..770835b --- /dev/null +++ b/portas/portas/locale/ru/LC_MESSAGES/portas.po @@ -0,0 +1,1216 @@ +# Russian translations for PROJECT. +# Copyright (C) 2012 ORGANIZATION +# This file is distributed under the same license as the PROJECT project. +# FIRST AUTHOR , 2012. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PROJECT VERSION\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2012-02-19 17:19-0800\n" +"PO-Revision-Date: 2012-04-26 17:40+0800\n" +"Last-Translator: FULL NAME \n" +"Language-Team: ru \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: glance/api/middleware/cache.py:50 +msgid "Initialized image cache middleware" +msgstr "" + +#: glance/api/middleware/cache.py:77 +#, python-format +msgid "Cache hit for image '%s'" +msgstr "" + +#: glance/api/middleware/cache.py:93 +#, python-format +msgid "" +"Image cache contained image file for image '%s', however the registry did" +" not contain metadata for that image!" +msgstr "" + +#: glance/api/middleware/cache.py:121 +#, python-format +msgid "Removing image %s from cache" +msgstr "" + +#: glance/api/middleware/cache_manage.py:70 +msgid "Initialized image cache management middleware" +msgstr "" + +#: glance/api/middleware/version_negotiation.py:53 +#, python-format +msgid "Processing request: %(method)s %(path)s Accept: %(accept)s" +msgstr "" + +#: glance/api/middleware/version_negotiation.py:66 +#, python-format +msgid "Matched versioned URI. Version: %d.%d" +msgstr "" + +#: glance/api/middleware/version_negotiation.py:73 +#, python-format +msgid "Unknown version in versioned URI: %d.%d. Returning version choices." +msgstr "" + +#: glance/api/middleware/version_negotiation.py:87 +#, python-format +msgid "Matched versioned media type. Version: %d.%d" +msgstr "" + +#: glance/api/middleware/version_negotiation.py:93 +#, python-format +msgid "Unknown version in accept header: %d.%d...returning version choices." +msgstr "" + +#: glance/api/middleware/version_negotiation.py:100 +#, python-format +msgid "Unknown accept header: %s...returning version choices." +msgstr "" + +#: glance/api/v1/controller.py:43 glance/api/v1/members.py:37 +#, python-format +msgid "Image with identifier %s not found" +msgstr "" + +#: glance/api/v1/controller.py:48 glance/api/v1/members.py:41 +msgid "Unauthorized image access" +msgstr "" + +#: glance/api/v1/controller.py:60 +#, python-format +msgid "Image %s is not active" +msgstr "" + +#: glance/api/v1/images.py:292 +#, python-format +msgid "An image with identifier %s already exists" +msgstr "" + +#: glance/api/v1/images.py:297 +#, python-format +msgid "Failed to reserve image. Got error: %(e)s" +msgstr "" + +#: glance/api/v1/images.py:302 +msgid "Not authorized to reserve image." +msgstr "" + +#: glance/api/v1/images.py:324 +msgid "Content-Type must be application/octet-stream" +msgstr "" + +#: glance/api/v1/images.py:334 +#, python-format +msgid "Setting image %s to status 'saving'" +msgstr "" + +#: glance/api/v1/images.py:338 +#, python-format +msgid "Uploading image data for image %(image_id)s to %(store_name)s store" +msgstr "" + +#: glance/api/v1/images.py:345 +msgid "Got request with no content-length and no x-image-meta-size header" +msgstr "" + +#: glance/api/v1/images.py:351 +#, python-format +msgid "" +"Denying attempt to upload image larger than %(max_image_size)d. Supplied " +"image size was %(image_size)d" +msgstr "" + +#: glance/api/v1/images.py:365 +#, python-format +msgid "" +"Supplied checksum (%(supplied_checksum)s) and checksum generated from " +"uploaded image (%(checksum)s) did not match. Setting image status to " +"'killed'." +msgstr "" + +#: glance/api/v1/images.py:376 +#, python-format +msgid "" +"Updating image %(image_id)s data. Checksum set to %(checksum)s, size set " +"to %(size)d" +msgstr "" + +#: glance/api/v1/images.py:387 +#, python-format +msgid "Attempt to upload duplicate image: %s" +msgstr "" + +#: glance/api/v1/images.py:394 +#, python-format +msgid "Unauthorized upload attempt: %s" +msgstr "" + +#: glance/api/v1/images.py:402 +#, python-format +msgid "Image storage media is full: %s" +msgstr "" + +#: glance/api/v1/images.py:410 +#, python-format +msgid "Insufficient permissions on image storage media: %s" +msgstr "" + +#: glance/api/v1/images.py:428 +#, python-format +msgid "Error uploading image: (%(class_name)s): %(exc)s" +msgstr "" + +#: glance/api/v1/images.py:473 +#, python-format +msgid "Unable to kill image %(id)s: %(exc)s" +msgstr "" + +#: glance/api/v1/images.py:532 glance/api/v1/images.py:564 +#: glance/api/v1/images.py:637 +msgid "Read-only access" +msgstr "" + +#: glance/api/v1/images.py:585 +msgid "Cannot upload to an unqueued image" +msgstr "" + +#: glance/api/v1/images.py:591 +msgid "Attempted to update Location field for an image not in queued status." +msgstr "" + +#: glance/api/v1/images.py:602 glance/registry/api/v1/images.py:392 +#, python-format +msgid "Failed to update image metadata. Got error: %(e)s" +msgstr "" + +#: glance/api/v1/images.py:644 +msgid "Image is protected" +msgstr "" + +#: glance/api/v1/images.py:680 +#, python-format +msgid "Requested store %s not available on this Glance server" +msgstr "" + +#: glance/api/v1/images.py:696 +#, python-format +msgid "Default store %s not available on this Glance server\n" +msgstr "" + +#: glance/api/v1/images.py:714 +#, python-format +msgid "Incoming image size of %s was not convertible to an integer." +msgstr "" + +#: glance/api/v1/images.py:723 +#, python-format +msgid "" +"Denying attempt to upload image larger than %(max_image_size)d. Supplied " +"image size was %(incoming_image_size)d" +msgstr "" + +#: glance/api/v1/images.py:800 +#, python-format +msgid "An error occurred during image.send notification: %(err)s" +msgstr "" + +#: glance/api/v1/images.py:831 +#, python-format +msgid "" +"An error occurred reading from backend storage for image %(image_id): " +"%(err)s" +msgstr "" + +#: glance/api/v1/images.py:837 +#, python-format +msgid "" +"Backend storage for image %(image_id)s disconnected after writing only " +"%(bytes_written)d bytes" +msgstr "" + +#: glance/api/v1/images.py:841 +#, python-format +msgid "Corrupt image download for image %(image_id)s" +msgstr "" + +#: glance/api/v1/members.py:53 glance/api/v1/members.py:88 +#: glance/api/v1/members.py:124 glance/registry/api/v1/members.py:70 +#: glance/registry/api/v1/members.py:173 glance/registry/api/v1/members.py:228 +msgid "No authenticated user" +msgstr "" + +#: glance/common/auth.py:168 glance/common/auth.py:213 +#, python-format +msgid "Unexpected response: %s" +msgstr "" + +#: glance/common/auth.py:235 +#, python-format +msgid "Unknown auth strategy '%s'" +msgstr "" + +#: glance/common/client.py:267 +msgid "" +"You have selected to use SSL in connecting, and you have supplied a cert," +" however you have failed to supply either a key_file parameter or set the" +" GLANCE_CLIENT_KEY_FILE environ variable" +msgstr "" + +#: glance/common/client.py:275 +msgid "" +"You have selected to use SSL in connecting, and you have supplied a key, " +"however you have failed to supply either a cert_file parameter or set the" +" GLANCE_CLIENT_CERT_FILE environ variable" +msgstr "" + +#: glance/common/client.py:283 +#, python-format +msgid "The key file you specified %s does not exist" +msgstr "" + +#: glance/common/client.py:289 +#, python-format +msgid "The cert file you specified %s does not exist" +msgstr "" + +#: glance/common/client.py:295 +#, python-format +msgid "The CA file you specified %s does not exist" +msgstr "" + +#: glance/common/config.py:90 +msgid "Invalid syslog facility" +msgstr "" + +#: glance/common/exception.py:37 +msgid "An unknown exception occurred" +msgstr "" + +#: glance/common/exception.py:59 +msgid "Missing required argument." +msgstr "" + +#: glance/common/exception.py:63 +#, python-format +msgid "Missing required credential: %(required)s" +msgstr "" + +#: glance/common/exception.py:67 +msgid "An object with the specified identifier was not found." +msgstr "" + +#: glance/common/exception.py:71 +#, python-format +msgid "Unknown scheme '%(scheme)s' found in URI" +msgstr "" + +#: glance/common/exception.py:75 +#, python-format +msgid "The Store URI %(uri)s was malformed. Reason: %(reason)s" +msgstr "" + +#: glance/common/exception.py:79 +msgid "An object with the same identifier already exists." +msgstr "" + +#: glance/common/exception.py:83 +msgid "There is not enough disk space on the image storage media." +msgstr "" + +#: glance/common/exception.py:87 +msgid "Permission to write image storage media denied." +msgstr "" + +#: glance/common/exception.py:91 +#, python-format +msgid "" +"Failed to import requested object/class: '%(import_str)s'. Reason: " +"%(reason)s" +msgstr "" + +#: glance/common/exception.py:96 +#, python-format +msgid "Connect error/bad request to Auth service at URL %(url)s." +msgstr "" + +#: glance/common/exception.py:100 +#, python-format +msgid "Auth service at URL %(url)s not found." +msgstr "" + +#: glance/common/exception.py:104 +msgid "Authorization failed." +msgstr "" + +#: glance/common/exception.py:108 glance/common/exception.py:112 +msgid "You are not authorized to complete this action." +msgstr "" + +#: glance/common/exception.py:116 +msgid "Data supplied was not valid." +msgstr "" + +#: glance/common/exception.py:120 +#, python-format +msgid "Redirecting to %(uri)s for authorization." +msgstr "" + +#: glance/common/exception.py:124 +msgid "There was an error migrating the database." +msgstr "" + +#: glance/common/exception.py:128 +msgid "There was an error connecting to a server" +msgstr "" + +#: glance/common/exception.py:132 +msgid "There was an error configuring the client." +msgstr "" + +#: glance/common/exception.py:136 +#, python-format +msgid "" +"The request returned a 302 Multiple Choices. This generally means that " +"you have not included a version indicator in a request URI.\n" +"\n" +"The body of response returned:\n" +"%(body)s" +msgstr "" + +#: glance/common/exception.py:142 +#, python-format +msgid "Invalid content type %(content_type)s" +msgstr "" + +#: glance/common/exception.py:146 +#, python-format +msgid "Registry was not configured correctly on API server. Reason: %(reason)s" +msgstr "" + +#: glance/common/exception.py:151 +#, python-format +msgid "Store %(store_name)s could not be configured correctly. Reason: %(reason)s" +msgstr "" + +#: glance/common/exception.py:156 +#, python-format +msgid "" +"Driver %(driver_name)s could not be configured correctly. Reason: " +"%(reason)s" +msgstr "" + +#: glance/common/exception.py:161 +msgid "Deleting images from this store is not supported." +msgstr "" + +#: glance/common/exception.py:165 +msgid "Configuration for store failed. Adding images to this store is disabled." +msgstr "" + +#: glance/common/exception.py:170 +#, python-format +msgid "'%(strategy)s' is not an available notifier strategy." +msgstr "" + +#: glance/common/exception.py:174 +#, python-format +msgid "Maximum redirects (%(redirects)s) was exceeded." +msgstr "" + +#: glance/common/exception.py:178 +msgid "Received invalid HTTP redirect." +msgstr "" + +#: glance/common/exception.py:182 +msgid "Response from Keystone does not contain a Glance endpoint." +msgstr "" + +#: glance/common/wsgi.py:106 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: glance/common/wsgi.py:124 +#, python-format +msgid "Could not bind to %s:%s after trying for 30 seconds" +msgstr "" + +#: glance/common/wsgi.py:155 +msgid "SIGTERM received" +msgstr "" + +#: glance/common/wsgi.py:164 +msgid "SIGHUP received" +msgstr "" + +#: glance/common/wsgi.py:180 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: glance/common/wsgi.py:191 +#, python-format +msgid "Removing dead child %s" +msgstr "" + +#: glance/common/wsgi.py:199 +msgid "Caught keyboard interrupt. Exiting." +msgstr "" + +#: glance/common/wsgi.py:203 +msgid "Exited" +msgstr "" + +#: glance/common/wsgi.py:221 +#, python-format +msgid "Child %d exiting normally" +msgstr "" + +#: glance/common/wsgi.py:224 +#, python-format +msgid "Started child %s" +msgstr "" + +#: glance/common/wsgi.py:243 +msgid "Starting single process server" +msgstr "" + +#: glance/image_cache/__init__.py:57 +#, python-format +msgid "Image cache loaded driver '%s'." +msgstr "" + +#: glance/image_cache/__init__.py:60 +#, python-format +msgid "" +"Image cache driver '%(driver_name)s' failed to load. Got error: " +"'%(import_err)s." +msgstr "" + +#: glance/image_cache/__init__.py:65 glance/image_cache/__init__.py:82 +msgid "Defaulting to SQLite driver." +msgstr "" + +#: glance/image_cache/__init__.py:79 +#, python-format +msgid "" +"Image cache driver '%(driver_module)s' failed to configure. Got error: " +"'%(config_err)s" +msgstr "" + +#: glance/image_cache/__init__.py:164 +msgid "Image cache has free space, skipping prune..." +msgstr "" + +#: glance/image_cache/__init__.py:168 +#, python-format +msgid "" +"Image cache currently %(overage)d bytes over max size. Starting prune to " +"max size of %(max_size)d " +msgstr "" + +#: glance/image_cache/__init__.py:177 +#, python-format +msgid "Pruning '%(image_id)s' to free %(size)d bytes" +msgstr "" + +#: glance/image_cache/__init__.py:185 +#, python-format +msgid "" +"Pruning finished pruning. Pruned %(total_files_pruned)d and " +"%(total_bytes_pruned)d." +msgstr "" + +#: glance/image_cache/__init__.py:220 +#, python-format +msgid "Tee'ing image '%s' into cache" +msgstr "" + +#: glance/image_cache/__init__.py:232 +#, python-format +msgid "" +"Exception encountered while tee'ing image '%s' into cache. Continuing " +"with response." +msgstr "" + +#: glance/image_cache/prefetcher.py:55 +#, python-format +msgid "Image '%s' is not active. Not caching." +msgstr "" + +#: glance/image_cache/prefetcher.py:60 glance/image_cache/queue_image.py:52 +#, python-format +msgid "No metadata found for image '%s'" +msgstr "" + +#: glance/image_cache/prefetcher.py:64 +#, python-format +msgid "Caching image '%s'" +msgstr "" + +#: glance/image_cache/prefetcher.py:72 +msgid "Nothing to prefetch." +msgstr "" + +#: glance/image_cache/prefetcher.py:76 +#, python-format +msgid "Found %d images to prefetch" +msgstr "" + +#: glance/image_cache/prefetcher.py:82 +msgid "Failed to successfully cache all images in queue." +msgstr "" + +#: glance/image_cache/prefetcher.py:86 +#, python-format +msgid "Successfully cached all %d images" +msgstr "" + +#: glance/image_cache/queue_image.py:47 +#, python-format +msgid "Image '%s' is not active. Not queueing." +msgstr "" + +#: glance/image_cache/queue_image.py:55 +#, python-format +msgid "Queueing image '%s'" +msgstr "" + +#: glance/image_cache/queue_image.py:63 +msgid "No images to queue!" +msgstr "" + +#: glance/image_cache/queue_image.py:66 +#, python-format +msgid "Received %d images to queue" +msgstr "" + +#: glance/image_cache/queue_image.py:72 +msgid "Failed to successfully queue all images in queue." +msgstr "" + +#: glance/image_cache/queue_image.py:76 +#, python-format +msgid "Successfully queued all %d images" +msgstr "" + +#: glance/image_cache/drivers/base.py:65 +#, python-format +msgid "Failed to read %s from config" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:120 +#, python-format +msgid "Failed to initialize the image cache database. Got error: %s" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:159 +#: glance/image_cache/drivers/xattr.py:143 +msgid "Gathering cached image entries." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:294 +#: glance/image_cache/drivers/xattr.py:273 +#, python-format +msgid "Fetch finished, moving '%(incomplete_path)s' to '%(final_path)s'" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:318 +#: glance/image_cache/drivers/xattr.py:289 +#, python-format +msgid "" +"Fetch of cache file failed, rolling back by moving '%(incomplete_path)s' " +"to '%(invalid_path)s'" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:372 +#, python-format +msgid "Error executing SQLite call. Got error: %s" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:388 +#: glance/image_cache/drivers/xattr.py:327 +#, python-format +msgid "Not queueing image '%s'. Already cached." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:393 +#: glance/image_cache/drivers/xattr.py:332 +#, python-format +msgid "Not queueing image '%s'. Already being written to cache" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:399 +#: glance/image_cache/drivers/xattr.py:338 +#, python-format +msgid "Not queueing image '%s'. Already queued." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:460 +#: glance/image_cache/drivers/xattr.py:429 +#, python-format +msgid "Deleting image cache file '%s'" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:463 +#: glance/image_cache/drivers/xattr.py:432 +#, python-format +msgid "Cached image file '%s' doesn't exist, unable to delete" +msgstr "" + +#: glance/image_cache/drivers/xattr.py:104 +#, python-format +msgid "" +"The device housing the image cache directory %(image_cache_dir)s does not" +" support xattr. It is likely you need to edit your fstab and add the " +"user_xattr option to the appropriate line for the device housing the " +"cache directory." +msgstr "" + +#: glance/image_cache/drivers/xattr.py:281 +#, python-format +msgid "Removing image '%s' from queue after caching it." +msgstr "" + +#: glance/image_cache/drivers/xattr.py:343 +#, python-format +msgid "Queueing image '%s'." +msgstr "" + +#: glance/image_cache/drivers/xattr.py:375 +#, python-format +msgid "No grace period, reaping '%(path)s' immediately" +msgstr "" + +#: glance/image_cache/drivers/xattr.py:380 +#, python-format +msgid "Cache entry '%(path)s' exceeds grace period, (%(age)i s > %(grace)i s)" +msgstr "" + +#: glance/image_cache/drivers/xattr.py:385 +#, python-format +msgid "Reaped %(reaped)s %(entry_type)s cache entries" +msgstr "" + +#: glance/notifier/notify_kombu.py:88 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: glance/notifier/notify_kombu.py:92 +#, python-format +msgid "Connecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: glance/notifier/notify_kombu.py:121 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: glance/notifier/notify_kombu.py:150 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: glance/notifier/notify_kombu.py:162 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: glance/notifier/notify_kombu.py:169 +#, python-format +msgid "Notification with priority %(priority)s failed; msg=%s" +msgstr "" + +#: glance/notifier/notify_kombu.py:206 +#, python-format +msgid "Unable to send notification: %s" +msgstr "" + +#: glance/notifier/notify_qpid.py:113 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: glance/registry/__init__.py:66 +msgid "Configuration option was not valid" +msgstr "" + +#: glance/registry/__init__.py:70 +msgid "Could not find required configuration option" +msgstr "" + +#: glance/registry/__init__.py:117 +msgid "Adding image metadata..." +msgstr "" + +#: glance/registry/__init__.py:124 +#, python-format +msgid "Updating image metadata for image %s..." +msgstr "" + +#: glance/registry/__init__.py:130 +#, python-format +msgid "Deleting image metadata for image %s..." +msgstr "" + +#: glance/registry/api/v1/images.py:70 +msgid "Invalid marker. Image could not be found." +msgstr "" + +#: glance/registry/api/v1/images.py:171 +msgid "Unrecognized changes-since value" +msgstr "" + +#: glance/registry/api/v1/images.py:176 +msgid "protected must be True, or False" +msgstr "" + +#: glance/registry/api/v1/images.py:202 +msgid "limit param must be an integer" +msgstr "" + +#: glance/registry/api/v1/images.py:205 +msgid "limit param must be positive" +msgstr "" + +#: glance/registry/api/v1/images.py:214 +msgid "Invalid marker format" +msgstr "" + +#: glance/registry/api/v1/images.py:224 +#, python-format +msgid "Unsupported sort_key. Acceptable values: %s" +msgstr "" + +#: glance/registry/api/v1/images.py:233 +#, python-format +msgid "Unsupported sort_dir. Acceptable values: %s" +msgstr "" + +#: glance/registry/api/v1/images.py:259 +msgid "is_public must be None, True, or False" +msgstr "" + +#: glance/registry/api/v1/images.py:280 glance/registry/api/v1/images.py:403 +#: glance/registry/api/v1/members.py:47 glance/registry/api/v1/members.py:81 +#: glance/registry/api/v1/members.py:183 glance/registry/api/v1/members.py:238 +#, python-format +msgid "Access by %(user)s to image %(id)s denied" +msgstr "" + +#: glance/registry/api/v1/images.py:306 +#, python-format +msgid "Access by %(user)s to delete public image %(id)s denied" +msgstr "" + +#: glance/registry/api/v1/images.py:312 +#, python-format +msgid "Access by %(user)s to delete private image %(id)s denied" +msgstr "" + +#: glance/registry/api/v1/images.py:345 +msgid "Invalid image id format" +msgstr "" + +#: glance/registry/api/v1/images.py:352 +#, python-format +msgid "Image with identifier %s already exists!" +msgstr "" + +#: glance/registry/api/v1/images.py:356 +#, python-format +msgid "Failed to add image metadata. Got error: %(e)s" +msgstr "" + +#: glance/registry/api/v1/images.py:382 +#, python-format +msgid "Updating image %(id)s with metadata: %(image_data)r" +msgstr "" + +#: glance/registry/api/v1/members.py:89 glance/registry/api/v1/members.py:191 +#: glance/registry/api/v1/members.py:246 +msgid "No permission to share that image" +msgstr "" + +#: glance/registry/api/v1/members.py:97 glance/registry/api/v1/members.py:110 +#: glance/registry/api/v1/members.py:201 +#, python-format +msgid "Invalid membership association: %s" +msgstr "" + +#: glance/registry/api/v1/members.py:275 +msgid "Invalid marker. Membership could not be found." +msgstr "" + +#: glance/registry/db/api.py:83 +#, python-format +msgid "" +"Error configuring registry database with supplied sql_connection " +"'%(sql_connection)s'. Got error:\n" +"%(err)s" +msgstr "" + +#: glance/registry/db/api.py:100 +msgid "Attempted to modify image user did not own." +msgstr "" + +#: glance/registry/db/api.py:101 +msgid "You do not own this image" +msgstr "" + +#: glance/registry/db/migration.py:47 +#, python-format +msgid "database '%(sql_connection)s' is not under migration control" +msgstr "" + +#: glance/registry/db/migration.py:64 +#, python-format +msgid "Upgrading %(sql_connection)s to version %(version_str)s" +msgstr "" + +#: glance/registry/db/migration.py:80 +#, python-format +msgid "Downgrading %(sql_connection)s to version %(version)s" +msgstr "" + +#: glance/registry/db/migration.py:95 +#, python-format +msgid "database '%(sql_connection)s' is already under migration control" +msgstr "" + +#: glance/registry/db/migrate_repo/schema.py:96 +#, python-format +msgid "creating table %(table)s" +msgstr "" + +#: glance/registry/db/migrate_repo/schema.py:102 +#, python-format +msgid "dropping table %(table)s" +msgstr "" + +#: glance/store/__init__.py:196 +#, python-format +msgid "Failed to delete image at %s from store (%s)" +msgstr "" + +#: glance/store/__init__.py:209 +#, python-format +msgid "Image id %(image_id)s already queued for delete" +msgstr "" + +#: glance/store/base.py:44 +msgid "Failed to configure store correctly. Disabling add method." +msgstr "" + +#: glance/store/filesystem.py:59 +msgid "No path specified" +msgstr "" + +#: glance/store/filesystem.py:111 +#, python-format +msgid "Could not find %s in configuration options." +msgstr "" + +#: glance/store/filesystem.py:118 +#, python-format +msgid "Directory to write image files does not exist (%s). Creating." +msgstr "" + +#: glance/store/filesystem.py:124 +#, python-format +msgid "Unable to create datadir: %s" +msgstr "" + +#: glance/store/filesystem.py:142 +#, python-format +msgid "Image file %s not found" +msgstr "" + +#: glance/store/filesystem.py:144 +#, python-format +msgid "Found image at %s. Returning in ChunkedFile." +msgstr "" + +#: glance/store/filesystem.py:163 +#, python-format +msgid "Deleting image at %(fn)s" +msgstr "" + +#: glance/store/filesystem.py:166 +#, python-format +msgid "You cannot delete file %s" +msgstr "" + +#: glance/store/filesystem.py:169 +#, python-format +msgid "Image file %s does not exist" +msgstr "" + +#: glance/store/filesystem.py:194 +#, python-format +msgid "Image file %s already exists!" +msgstr "" + +#: glance/store/filesystem.py:218 +#, python-format +msgid "" +"Wrote %(bytes_written)d bytes to %(filepath)s with checksum " +"%(checksum_hex)s" +msgstr "" + +#: glance/store/http.py:76 +#, python-format +msgid "Credentials '%s' not well-formatted." +msgstr "" + +#: glance/store/http.py:82 +msgid "No address specified in HTTP URL" +msgstr "" + +#: glance/store/location.py:123 +#, python-format +msgid "Unable to find StoreLocation class in store %s" +msgstr "" + +#: glance/store/rbd.py:63 +msgid "URI must start with rbd://" +msgstr "" + +#: glance/store/rbd.py:96 glance/store/rbd.py:202 +#, python-format +msgid "RBD image %s does not exist" +msgstr "" + +#: glance/store/rbd.py:128 +#, python-format +msgid "Error in store configuration: %s" +msgstr "" + +#: glance/store/rbd.py:172 +#, python-format +msgid "RBD image %s already exists" +msgstr "" + +#: glance/store/s3.py:90 +msgid "" +"URI cannot contain more than one occurrence of a scheme.If you have " +"specified a URI like " +"s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id, you need" +" to change it to use the s3+https:// scheme, like so: " +"s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id" +msgstr "" + +#: glance/store/s3.py:125 +#, python-format +msgid "Badly formed S3 credentials %s" +msgstr "" + +#: glance/store/s3.py:137 +msgid "Badly formed S3 URI. Missing s3 service URL." +msgstr "" + +#: glance/store/s3.py:140 +msgid "Badly formed S3 URI" +msgstr "" + +#: glance/store/s3.py:233 glance/store/swift.py:312 +#, python-format +msgid "Could not find %(param)s in configuration options." +msgstr "" + +#: glance/store/s3.py:281 +#, python-format +msgid "" +"Retrieved image object from S3 using (s3_host=%(s3_host)s, " +"access_key=%(accesskey)s, bucket=%(bucket)s, key=%(obj_name)s)" +msgstr "" + +#: glance/store/s3.py:333 +#, python-format +msgid "S3 already has an image at location %s" +msgstr "" + +#: glance/store/s3.py:336 +#, python-format +msgid "" +"Adding image object to S3 using (s3_host=%(s3_host)s, " +"access_key=%(access_key)s, bucket=%(bucket)s, key=%(obj_name)s)" +msgstr "" + +#: glance/store/s3.py:357 +#, python-format +msgid "Writing request body file to temporary file for %s" +msgstr "" + +#: glance/store/s3.py:371 +#, python-format +msgid "Uploading temporary file to S3 for %s" +msgstr "" + +#: glance/store/s3.py:379 +#, python-format +msgid "" +"Wrote %(size)d bytes to S3 key named %(obj_name)s with checksum " +"%(checksum_hex)s" +msgstr "" + +#: glance/store/s3.py:404 +#, python-format +msgid "" +"Deleting image object from S3 using (s3_host=%(s3_host)s, " +"access_key=%(accesskey)s, bucket=%(bucket)s, key=%(obj_name)s)" +msgstr "" + +#: glance/store/s3.py:425 +#, python-format +msgid "Could not find bucket with ID %(bucket_id)s" +msgstr "" + +#: glance/store/s3.py:473 +#, python-format +msgid "Could not find key %(obj)s in bucket %(bucket)s" +msgstr "" + +#: glance/store/scrubber.py:42 +#, python-format +msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" +msgstr "" + +#: glance/store/scrubber.py:55 +msgid "Daemon Shutdown on KeyboardInterrupt" +msgstr "" + +#: glance/store/scrubber.py:59 +msgid "Runing application" +msgstr "" + +#: glance/store/scrubber.py:62 +#, python-format +msgid "Next run scheduled in %s seconds" +msgstr "" + +#: glance/store/scrubber.py:83 +#, python-format +msgid "Initializing scrubber with conf: %s" +msgstr "" + +#: glance/store/scrubber.py:98 +#, python-format +msgid "%s does not exist" +msgstr "" + +#: glance/store/scrubber.py:120 glance/store/scrubber.py:175 +#, python-format +msgid "Deleting %s images" +msgstr "" + +#: glance/store/scrubber.py:129 +#, python-format +msgid "Deleting %(uri)s" +msgstr "" + +#: glance/store/scrubber.py:132 +#, python-format +msgid "Failed to delete image from store (%(uri)s)." +msgstr "" + +#: glance/store/scrubber.py:151 +#, python-format +msgid "Getting images deleted before %s" +msgstr "" + +#: glance/store/swift.py:105 +msgid "" +"URI cannot contain more than one occurrence of a scheme.If you have " +"specified a URI like " +"swift://user:pass@http://authurl.com/v1/container/obj, you need to change" +" it to use the swift+http:// scheme, like so: " +"swift+http://user:pass@authurl.com/v1/container/obj" +msgstr "" + +#: glance/store/swift.py:142 +#, python-format +msgid "Badly formed credentials '%(creds)s' in Swift URI" +msgstr "" + +#: glance/store/swift.py:163 +msgid "Badly formed Swift URI" +msgstr "" + +#: glance/store/swift.py:229 +#, python-format +msgid "Error in configuration conf: %s" +msgstr "" + +#: glance/store/swift.py:264 glance/store/swift.py:506 +#, python-format +msgid "Swift could not find image at uri %(uri)s" +msgstr "" + +#: glance/store/swift.py:303 +#, python-format +msgid "" +"Creating Swift connection with (auth_address=%(auth_url)s, user=%(user)s," +" snet=%(snet)s)" +msgstr "" + +#: glance/store/swift.py:367 +#, python-format +msgid "Adding image object '%(obj_name)s' to Swift" +msgstr "" + +#: glance/store/swift.py:387 +msgid "Cannot determine image size. Adding as a segmented object to Swift." +msgstr "" + +#: glance/store/swift.py:411 +#, python-format +msgid "" +"Wrote chunk %(chunk_id)d/%(total_chunks)s of length %(bytes_read)d to " +"Swift returning MD5 of content: %(chunk_etag)s" +msgstr "" + +#: glance/store/swift.py:420 +msgid "Deleting final zero-length chunk" +msgstr "" + +#: glance/store/swift.py:456 +#, python-format +msgid "Swift already has an image at location %s" +msgstr "" + +#: glance/store/swift.py:458 +#, python-format +msgid "" +"Failed to add object to Swift.\n" +"Got error from Swift: %(e)s" +msgstr "" + +#: glance/store/swift.py:546 +#, python-format +msgid "" +"Failed to add container to Swift.\n" +"Got error from Swift: %(e)s" +msgstr "" + +#: glance/store/swift.py:550 +#, python-format +msgid "" +"The container %(container)s does not exist in Swift. Please set the " +"swift_store_create_container_on_put optionto add container to Swift " +"automatically." +msgstr "" + diff --git a/portas/portas/openstack/__init__.py b/portas/portas/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/openstack/common/__init__.py b/portas/portas/openstack/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/openstack/common/eventlet_backdoor.py b/portas/portas/openstack/common/eventlet_backdoor.py new file mode 100644 index 0000000..8b81ebf --- /dev/null +++ b/portas/portas/openstack/common/eventlet_backdoor.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Openstack, LLC. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gc +import pprint +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +eventlet_backdoor_opts = [ + cfg.IntOpt('backdoor_port', + default=None, + help='port for eventlet backdoor to listen') +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) + + +def _dont_use_this(): + print "Don't use this, just disconnect instead" + + +def _find_objects(t): + return filter(lambda o: isinstance(o, t), gc.get_objects()) + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print i, gt + traceback.print_stack(gt.gr_frame) + print + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print threadId + traceback.print_stack(stack) + print + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = eventlet.listen(('localhost', CONF.backdoor_port)) + port = sock.getsockname()[1] + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/portas/portas/openstack/common/exception.py b/portas/portas/openstack/common/exception.py new file mode 100644 index 0000000..1452018 --- /dev/null +++ b/portas/portas/openstack/common/exception.py @@ -0,0 +1,142 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exceptions common to OpenStack projects +""" + +import logging + +from portas.openstack.common.gettextutils import _ + +_FATAL_EXCEPTION_FORMAT_ERRORS = False + + +class Error(Exception): + def __init__(self, message=None): + super(Error, self).__init__(message) + + +class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): + self.message = message + self.code = code + super(ApiError, self).__init__('%s: %s' % (code, message)) + + +class NotFound(Error): + pass + + +class UnknownScheme(Error): + + msg = "Unknown scheme '%s' found in URI" + + def __init__(self, scheme): + msg = self.__class__.msg % scheme + super(UnknownScheme, self).__init__(msg) + + +class BadStoreUri(Error): + + msg = "The Store URI %s was malformed. Reason: %s" + + def __init__(self, uri, reason): + msg = self.__class__.msg % (uri, reason) + super(BadStoreUri, self).__init__(msg) + + +class Duplicate(Error): + pass + + +class NotAuthorized(Error): + pass + + +class NotEmpty(Error): + pass + + +class Invalid(Error): + pass + + +class BadInputError(Exception): + """Error resulting from a client sending bad input to a server""" + pass + + +class MissingArgumentError(Error): + pass + + +class DatabaseMigrationError(Error): + pass + + +class ClientConnectionError(Exception): + """Error resulting from a client connecting to a server""" + pass + + +def wrap_exception(f): + def _wrap(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + if not isinstance(e, Error): + #exc_type, exc_value, exc_traceback = sys.exc_info() + logging.exception(_('Uncaught exception')) + #logging.error(traceback.extract_stack(exc_traceback)) + raise Error(str(e)) + raise + _wrap.func_name = f.func_name + return _wrap + + +class OpenstackException(Exception): + """ + Base Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = "An unknown exception occurred" + + def __init__(self, **kwargs): + try: + self._error_string = self.message % kwargs + + except Exception as e: + if _FATAL_EXCEPTION_FORMAT_ERRORS: + raise e + else: + # at least get the core message out if something happened + self._error_string = self.message + + def __str__(self): + return self._error_string + + +class MalformedRequestBody(OpenstackException): + message = "Malformed message body: %(reason)s" + + +class InvalidContentType(OpenstackException): + message = "Invalid content type %(content_type)s" diff --git a/portas/portas/openstack/common/gettextutils.py b/portas/portas/openstack/common/gettextutils.py new file mode 100644 index 0000000..e196060 --- /dev/null +++ b/portas/portas/openstack/common/gettextutils.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from portas.openstack.common.gettextutils import _ +""" + +import gettext + + +t = gettext.translation('openstack-common', 'locale', fallback=True) + + +def _(msg): + return t.ugettext(msg) diff --git a/portas/portas/openstack/common/importutils.py b/portas/portas/openstack/common/importutils.py new file mode 100644 index 0000000..9dec764 --- /dev/null +++ b/portas/portas/openstack/common/importutils.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ValueError, AttributeError): + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """ + Import a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/portas/portas/openstack/common/jsonutils.py b/portas/portas/openstack/common/jsonutils.py new file mode 100644 index 0000000..2682c39 --- /dev/null +++ b/portas/portas/openstack/common/jsonutils.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import datetime +import functools +import inspect +import itertools +import json +import logging +import xmlrpclib + +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + for test in nasty: + if test(value): + return unicode(value) + + # value of itertools.count doesn't get caught by inspects + # above and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return unicode(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + LOG.error(_('Max serialization depth exceeded on object: %d %s'), + level, value) + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if isinstance(value, (list, tuple)): + return [recursive(v) for v in value] + elif isinstance(value, dict): + return dict((k, recursive(v)) for k, v in value.iteritems()) + elif convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + else: + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return unicode(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s): + return json.loads(s) + + +def load(s): + return json.load(s) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/portas/portas/openstack/common/local.py b/portas/portas/openstack/common/local.py new file mode 100644 index 0000000..8bdc837 --- /dev/null +++ b/portas/portas/openstack/common/local.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Greenthread local storage of variables using weak references""" + +import weakref + +from eventlet import corolocal + + +class WeakLocal(corolocal.local): + def __getattribute__(self, attr): + rval = corolocal.local.__getattribute__(self, attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return corolocal.local.__setattr__(self, attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = corolocal.local diff --git a/portas/portas/openstack/common/log.py b/portas/portas/openstack/common/log.py new file mode 100644 index 0000000..8e2dac2 --- /dev/null +++ b/portas/portas/openstack/common/log.py @@ -0,0 +1,521 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import cStringIO +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import stat +import sys +import traceback + +from oslo.config import cfg + +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import jsonutils +from portas.openstack.common import local +from portas.openstack.common import notifier + + +_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=_DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %(default)s'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If not set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The directory to keep log files in ' + '(will be prepended to --log-file)'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), +] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s ' + '[%(request_id)s %(user)s %(tenant)s] %(instance)s' + '%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqplib=WARN', + 'sqlalchemy=WARN', + 'boto=WARN', + 'suds=INFO', + 'keystone=INFO', + 'eventlet.wsgi.server=WARN' + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + +class ContextAdapter(logging.LoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + else: + instance_uuid = kwargs.pop('instance_uuid', None) + if instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra.update({'instance': instance_extra}) + + extra.update({"project": self.project}) + extra.update({"version": self.version}) + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [itertools.ifilter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if ('portas.openstack.common.notifier.log_notifier' in + CONF.notification_driver): + return + notifier.api.notify(None, 'error.publisher', + 'error_notification', + notifier.api.ERROR, + dict(error=record.msg)) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(type, value, tb): + extra = {} + if CONF.verbose: + extra['exc_info'] = (type, value, tb) + getLogger(product_name).critical(str(value), **extra) + return logging_excepthook + + +def setup(product_name): + """Setup logging.""" + if CONF.log_config: + logging.config.fileConfig(CONF.log_config) + else: + _setup_logging_from_conf(product_name) + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +def _setup_logging_from_conf(product_name): + log_root = getLogger(product_name).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + mode = int(CONF.logfile_mode, 8) + st = os.stat(logpath) + if st.st_mode != (stat.S_IFREG | mode): + os.chmod(logpath, mode) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not CONF.log_file: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + + for handler in log_root.handlers: + datefmt = CONF.log_date_format + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + level = logging.NOTSET + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + for handler in log_root.handlers: + logger.addHandler(handler) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) + + +class LegacyFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formating params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/portas/portas/openstack/common/loopingcall.py b/portas/portas/openstack/common/loopingcall.py new file mode 100644 index 0000000..c2be723 --- /dev/null +++ b/portas/portas/openstack/common/loopingcall.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from eventlet import event +from eventlet import greenthread + +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import log as logging +from portas.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCall(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = timeutils.utcnow() + self.f(*self.args, **self.kw) + end = timeutils.utcnow() + if not self._running: + break + delay = interval - timeutils.delta_seconds(start, end) + if delay <= 0: + LOG.warn(_('task run outlasted interval by %s sec') % + -delay) + greenthread.sleep(delay if delay > 0 else 0) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() diff --git a/portas/portas/openstack/common/notifier/__init__.py b/portas/portas/openstack/common/notifier/__init__.py new file mode 100644 index 0000000..482d54e --- /dev/null +++ b/portas/portas/openstack/common/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/portas/portas/openstack/common/notifier/api.py b/portas/portas/openstack/common/notifier/api.py new file mode 100644 index 0000000..af68d2d --- /dev/null +++ b/portas/portas/openstack/common/notifier/api.py @@ -0,0 +1,183 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg + +from portas.openstack.common import context +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import importutils +from portas.openstack.common import jsonutils +from portas.openstack.common import log as logging +from portas.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +notifier_opts = [ + cfg.MultiStrOpt('notification_driver', + default=[], + deprecated_name='list_notifier_drivers', + help='Driver or drivers to handle sending notifications'), + cfg.StrOpt('default_notification_level', + default='INFO', + help='Default notification level for outgoing notifications'), + cfg.StrOpt('default_publisher_id', + default='$host', + help='Default publisher_id for outgoing notifications'), +] + +CONF = cfg.CONF +CONF.register_opts(notifier_opts) + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify_decorator(name, fn): + """ decorator for notify which is used from utils.monkey_patch() + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + + """ + def wrapped_func(*args, **kwarg): + body = {} + body['args'] = [] + body['kwarg'] = {} + for arg in args: + body['args'].append(arg) + for key in kwarg: + body['kwarg'][key] = kwarg[key] + + ctxt = context.get_context_from_function_and_args(fn, args, kwarg) + notify(ctxt, + CONF.default_publisher_id, + name, + CONF.default_notification_level, + body) + return fn(*args, **kwarg) + return wrapped_func + + +def publisher_id(service, host=None): + if not host: + host = CONF.host + return "%s.%s" % (service, host) + + +def notify(context, publisher_id, event_type, priority, payload): + """Sends a notification using the specified driver + + :param publisher_id: the source worker_type.host of the message + :param event_type: the literal type of event (ex. Instance Creation) + :param priority: patterned after the enumeration of Python logging + levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + :param payload: A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id + a UUID representing the id for this notification + + timestamp + the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example:: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': timeutils.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities') % priority) + + # Ensure everything is JSON serializable. + payload = jsonutils.to_primitive(payload, convert_instances=True) + + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(timeutils.utcnow())) + + for driver in _get_drivers(): + try: + driver.notify(context, msg) + except Exception as e: + LOG.exception(_("Problem '%(e)s' attempting to " + "send to notification system. " + "Payload=%(payload)s") + % dict(e=e, payload=payload)) + + +_drivers = None + + +def _get_drivers(): + """Instantiate, cache, and return drivers based on the CONF.""" + global _drivers + if _drivers is None: + _drivers = {} + for notification_driver in CONF.notification_driver: + add_driver(notification_driver) + + return _drivers.values() + + +def add_driver(notification_driver): + """Add a notification driver at runtime.""" + # Make sure the driver list is initialized. + _get_drivers() + if isinstance(notification_driver, basestring): + # Load and add + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) + else: + # Driver is already loaded; just add the object. + _drivers[notification_driver] = notification_driver + + +def _reset_drivers(): + """Used by unit tests to reset the drivers.""" + global _drivers + _drivers = None diff --git a/portas/portas/openstack/common/notifier/log_notifier.py b/portas/portas/openstack/common/notifier/log_notifier.py new file mode 100644 index 0000000..bc4799f --- /dev/null +++ b/portas/portas/openstack/common/notifier/log_notifier.py @@ -0,0 +1,35 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from portas.openstack.common import jsonutils +from portas.openstack.common import log as logging + + +CONF = cfg.CONF + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model. + Log notifications using openstack's default logging system""" + + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'portas.openstack.common.notification.%s' % + message['event_type']) + getattr(logger, priority)(jsonutils.dumps(message)) diff --git a/portas/portas/openstack/common/notifier/no_op_notifier.py b/portas/portas/openstack/common/notifier/no_op_notifier.py new file mode 100644 index 0000000..ee1ddbd --- /dev/null +++ b/portas/portas/openstack/common/notifier/no_op_notifier.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/portas/portas/openstack/common/notifier/rabbit_notifier.py b/portas/portas/openstack/common/notifier/rabbit_notifier.py new file mode 100644 index 0000000..0687cfd --- /dev/null +++ b/portas/portas/openstack/common/notifier/rabbit_notifier.py @@ -0,0 +1,29 @@ +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import log as logging +from portas.openstack.common.notifier import rpc_notifier + +LOG = logging.getLogger(__name__) + + +def notify(context, message): + """Deprecated in Grizzly. Please use rpc_notifier instead.""" + + LOG.deprecated(_("The rabbit_notifier is now deprecated." + " Please use rpc_notifier instead.")) + rpc_notifier.notify(context, message) diff --git a/portas/portas/openstack/common/notifier/rpc_notifier.py b/portas/portas/openstack/common/notifier/rpc_notifier.py new file mode 100644 index 0000000..e00d9dc --- /dev/null +++ b/portas/portas/openstack/common/notifier/rpc_notifier.py @@ -0,0 +1,46 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from portas.openstack.common import context as req_context +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import log as logging +from portas.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'notification_topics', default=['notifications', ], + help='AMQP topic used for openstack notifications') + +CONF = cfg.CONF +CONF.register_opt(notification_topic_opt) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.notification_topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/portas/portas/openstack/common/notifier/rpc_notifier2.py b/portas/portas/openstack/common/notifier/rpc_notifier2.py new file mode 100644 index 0000000..b4afb9a --- /dev/null +++ b/portas/portas/openstack/common/notifier/rpc_notifier2.py @@ -0,0 +1,52 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +'''messaging based notification driver, with message envelopes''' + +from oslo.config import cfg + +from portas.openstack.common import context as req_context +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import log as logging +from portas.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'topics', default=['notifications', ], + help='AMQP topic(s) used for openstack notifications') + +opt_group = cfg.OptGroup(name='rpc_notifier2', + title='Options for rpc_notifier2') + +CONF = cfg.CONF +CONF.register_group(opt_group) +CONF.register_opt(notification_topic_opt, opt_group) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.rpc_notifier2.topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message, envelope=True) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/portas/portas/openstack/common/notifier/test_notifier.py b/portas/portas/openstack/common/notifier/test_notifier.py new file mode 100644 index 0000000..5e34880 --- /dev/null +++ b/portas/portas/openstack/common/notifier/test_notifier.py @@ -0,0 +1,22 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +NOTIFICATIONS = [] + + +def notify(_context, message): + """Test notifier, stores notifications in memory for unittests.""" + NOTIFICATIONS.append(message) diff --git a/portas/portas/openstack/common/service.py b/portas/portas/openstack/common/service.py new file mode 100644 index 0000000..042c721 --- /dev/null +++ b/portas/portas/openstack/common/service.py @@ -0,0 +1,332 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import os +import random +import signal +import sys +import time + +import eventlet +import logging as std_logging +from oslo.config import cfg + +from portas.openstack.common import eventlet_backdoor +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import importutils +from portas.openstack.common import log as logging +from portas.openstack.common import threadgroup + + +rpc = importutils.try_import('portas.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = threadgroup.ThreadGroup() + eventlet_backdoor.initialize_if_enabled() + + @staticmethod + def run_service(service): + """Start and wait for a service to finish. + + :param service: service to run and wait for. + :returns: None + + """ + service.start() + service.wait() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + self._services.add_thread(self.run_service, service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self._services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self._services.wait() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + raise SignalExit(signo) + + def wait(self): + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + status = None + try: + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + finally: + if rpc: + rpc.cleanup() + self.stop() + return status + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self): + self.children = {} + self.sigcaught = None + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process(self, service): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + signal.signal(signal.SIGTERM, _sigterm) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.run_service(service) + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + status = 0 + try: + self._child_process(wrap.service) + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + wrap.service.stop() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def wait(self): + """Loop waiting on children to die and respawning as necessary""" + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(.01) + continue + + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + if self.sigcaught: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[self.sigcaught] + LOG.info(_('Caught %s, stopping children'), signame) + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + def start(self): + pass + + def stop(self): + self.tg.stop() + + def wait(self): + self.tg.wait() + + +def launch(service, workers=None): + if workers: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + else: + launcher = ServiceLauncher() + launcher.launch_service(service) + return launcher diff --git a/portas/portas/openstack/common/setup.py b/portas/portas/openstack/common/setup.py new file mode 100644 index 0000000..80a0ece --- /dev/null +++ b/portas/portas/openstack/common/setup.py @@ -0,0 +1,359 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utilities with minimum-depends for use in setup.py +""" + +import email +import os +import re +import subprocess +import sys + +from setuptools.command import sdist + + +def parse_mailmap(mailmap='.mailmap'): + mapping = {} + if os.path.exists(mailmap): + with open(mailmap, 'r') as fp: + for l in fp: + try: + canonical_email, alias = re.match( + r'[^#]*?(<.+>).*(<.+>).*', l).groups() + except AttributeError: + continue + mapping[alias] = canonical_email + return mapping + + +def _parse_git_mailmap(git_dir, mailmap='.mailmap'): + mailmap = os.path.join(os.path.dirname(git_dir), mailmap) + return parse_mailmap(mailmap) + + +def canonicalize_emails(changelog, mapping): + """Takes in a string and an email alias mapping and replaces all + instances of the aliases in the string with their real email. + """ + for alias, email_address in mapping.iteritems(): + changelog = changelog.replace(alias, email_address) + return changelog + + +# Get requirements from the first file that exists +def get_reqs_from_files(requirements_files): + for requirements_file in requirements_files: + if os.path.exists(requirements_file): + with open(requirements_file, 'r') as fil: + return fil.read().split('\n') + return [] + + +def parse_requirements(requirements_files=['requirements.txt', + 'tools/pip-requires']): + requirements = [] + for line in get_reqs_from_files(requirements_files): + # For the requirements list, we need to inject only the portion + # after egg= so that distutils knows the package it's looking for + # such as: + # -e git://github.com/openstack/nova/master#egg=nova + if re.match(r'\s*-e\s+', line): + requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', + line)) + # such as: + # http://github.com/openstack/nova/zipball/master#egg=nova + elif re.match(r'\s*https?:', line): + requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1', + line)) + # -f lines are for index locations, and don't get used here + elif re.match(r'\s*-f\s+', line): + pass + # argparse is part of the standard library starting with 2.7 + # adding it to the requirements list screws distro installs + elif line == 'argparse' and sys.version_info >= (2, 7): + pass + else: + requirements.append(line) + + return requirements + + +def parse_dependency_links(requirements_files=['requirements.txt', + 'tools/pip-requires']): + dependency_links = [] + # dependency_links inject alternate locations to find packages listed + # in requirements + for line in get_reqs_from_files(requirements_files): + # skip comments and blank lines + if re.match(r'(\s*#)|(\s*$)', line): + continue + # lines with -e or -f need the whole line, minus the flag + if re.match(r'\s*-[ef]\s+', line): + dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line)) + # lines that are only urls can go in unmolested + elif re.match(r'\s*https?:', line): + dependency_links.append(line) + return dependency_links + + +def _run_shell_command(cmd, throw_on_error=False): + if os.name == 'nt': + output = subprocess.Popen(["cmd.exe", "/C", cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + else: + output = subprocess.Popen(["/bin/sh", "-c", cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out = output.communicate() + if output.returncode and throw_on_error: + raise Exception("%s returned %d" % cmd, output.returncode) + if len(out) == 0: + return None + if len(out[0].strip()) == 0: + return None + return out[0].strip() + + +def _get_git_directory(): + parent_dir = os.path.dirname(__file__) + while True: + git_dir = os.path.join(parent_dir, '.git') + if os.path.exists(git_dir): + return git_dir + parent_dir, child = os.path.split(parent_dir) + if not child: # reached to root dir + return None + + +def write_git_changelog(): + """Write a changelog based on the git changelog.""" + new_changelog = 'ChangeLog' + git_dir = _get_git_directory() + if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'): + if git_dir: + git_log_cmd = 'git --git-dir=%s log --stat' % git_dir + changelog = _run_shell_command(git_log_cmd) + mailmap = _parse_git_mailmap(git_dir) + with open(new_changelog, "w") as changelog_file: + changelog_file.write(canonicalize_emails(changelog, mailmap)) + else: + open(new_changelog, 'w').close() + + +def generate_authors(): + """Create AUTHORS file using git commits.""" + jenkins_email = 'jenkins@review.(openstack|stackforge).org' + old_authors = 'AUTHORS.in' + new_authors = 'AUTHORS' + git_dir = _get_git_directory() + if not os.getenv('SKIP_GENERATE_AUTHORS'): + if git_dir: + # don't include jenkins email address in AUTHORS file + git_log_cmd = ("git --git-dir=" + git_dir + + " log --format='%aN <%aE>' | sort -u | " + "egrep -v '" + jenkins_email + "'") + changelog = _run_shell_command(git_log_cmd) + mailmap = _parse_git_mailmap(git_dir) + with open(new_authors, 'w') as new_authors_fh: + new_authors_fh.write(canonicalize_emails(changelog, mailmap)) + if os.path.exists(old_authors): + with open(old_authors, "r") as old_authors_fh: + new_authors_fh.write('\n' + old_authors_fh.read()) + else: + open(new_authors, 'w').close() + + +_rst_template = """%(heading)s +%(underline)s + +.. automodule:: %(module)s + :members: + :undoc-members: + :show-inheritance: +""" + + +def get_cmdclass(): + """Return dict of commands to run from setup.py.""" + + cmdclass = dict() + + def _find_modules(arg, dirname, files): + for filename in files: + if filename.endswith('.py') and filename != '__init__.py': + arg["%s.%s" % (dirname.replace('/', '.'), + filename[:-3])] = True + + class LocalSDist(sdist.sdist): + """Builds the ChangeLog and Authors files from VC first.""" + + def run(self): + write_git_changelog() + generate_authors() + # sdist.sdist is an old style class, can't use super() + sdist.sdist.run(self) + + cmdclass['sdist'] = LocalSDist + + # If Sphinx is installed on the box running setup.py, + # enable setup.py to build the documentation, otherwise, + # just ignore it + try: + from sphinx.setup_command import BuildDoc + + class LocalBuildDoc(BuildDoc): + + builders = ['html', 'man'] + + def generate_autoindex(self): + print "**Autodocumenting from %s" % os.path.abspath(os.curdir) + modules = {} + option_dict = self.distribution.get_option_dict('build_sphinx') + source_dir = os.path.join(option_dict['source_dir'][1], 'api') + if not os.path.exists(source_dir): + os.makedirs(source_dir) + for pkg in self.distribution.packages: + if '.' not in pkg: + os.path.walk(pkg, _find_modules, modules) + module_list = modules.keys() + module_list.sort() + autoindex_filename = os.path.join(source_dir, 'autoindex.rst') + with open(autoindex_filename, 'w') as autoindex: + autoindex.write(""".. toctree:: + :maxdepth: 1 + +""") + for module in module_list: + output_filename = os.path.join(source_dir, + "%s.rst" % module) + heading = "The :mod:`%s` Module" % module + underline = "=" * len(heading) + values = dict(module=module, heading=heading, + underline=underline) + + print "Generating %s" % output_filename + with open(output_filename, 'w') as output_file: + output_file.write(_rst_template % values) + autoindex.write(" %s.rst\n" % module) + + def run(self): + if not os.getenv('SPHINX_DEBUG'): + self.generate_autoindex() + + for builder in self.builders: + self.builder = builder + self.finalize_options() + self.project = self.distribution.get_name() + self.version = self.distribution.get_version() + self.release = self.distribution.get_version() + BuildDoc.run(self) + + class LocalBuildLatex(LocalBuildDoc): + builders = ['latex'] + + cmdclass['build_sphinx'] = LocalBuildDoc + cmdclass['build_sphinx_latex'] = LocalBuildLatex + except ImportError: + pass + + return cmdclass + + +def _get_revno(git_dir): + """Return the number of commits since the most recent tag. + + We use git-describe to find this out, but if there are no + tags then we fall back to counting commits since the beginning + of time. + """ + describe = _run_shell_command( + "git --git-dir=%s describe --always" % git_dir) + if "-" in describe: + return describe.rsplit("-", 2)[-2] + + # no tags found + revlist = _run_shell_command( + "git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir) + return len(revlist.splitlines()) + + +def _get_version_from_git(pre_version): + """Return a version which is equal to the tag that's on the current + revision if there is one, or tag plus number of additional revisions + if the current revision has no tag.""" + + git_dir = _get_git_directory() + if git_dir: + if pre_version: + try: + return _run_shell_command( + "git --git-dir=" + git_dir + " describe --exact-match", + throw_on_error=True).replace('-', '.') + except Exception: + sha = _run_shell_command( + "git --git-dir=" + git_dir + " log -n1 --pretty=format:%h") + return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha) + else: + return _run_shell_command( + "git --git-dir=" + git_dir + " describe --always").replace( + '-', '.') + return None + + +def _get_version_from_pkg_info(package_name): + """Get the version from PKG-INFO file if we can.""" + try: + pkg_info_file = open('PKG-INFO', 'r') + except (IOError, OSError): + return None + try: + pkg_info = email.message_from_file(pkg_info_file) + except email.MessageError: + return None + # Check to make sure we're in our own dir + if pkg_info.get('Name', None) != package_name: + return None + return pkg_info.get('Version', None) + + +def get_version(package_name, pre_version=None): + """Get the version of the project. First, try getting it from PKG-INFO, if + it exists. If it does, that means we're in a distribution tarball or that + install has happened. Otherwise, if there is no PKG-INFO file, pull the + version from git. + + We do not support setup.py version sanity in git archive tarballs, nor do + we support packagers directly sucking our git repo into theirs. We expect + that a source tarball be made from our git repo - or that if someone wants + to make a source tarball from a fork of our repo with additional tags in it + that they understand and desire the results of doing that. + """ + version = os.environ.get("OSLO_PACKAGE_VERSION", None) + if version: + return version + version = _get_version_from_pkg_info(package_name) + if version: + return version + version = _get_version_from_git(pre_version) + if version: + return version + raise Exception("Versioning for this project requires either an sdist" + " tarball, or access to an upstream git repository.") diff --git a/portas/portas/openstack/common/sslutils.py b/portas/portas/openstack/common/sslutils.py new file mode 100644 index 0000000..77f174c --- /dev/null +++ b/portas/portas/openstack/common/sslutils.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import ssl + +from oslo.config import cfg + +from portas.openstack.common.gettextutils import _ + + +ssl_opts = [ + cfg.StrOpt('ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('cert_file', + default=None, + help="Certificate file to use when starting " + "the server securely"), + cfg.StrOpt('key_file', + default=None, + help="Private key file to use when starting " + "the server securely"), +] + + +CONF = cfg.CONF +CONF.register_opts(ssl_opts, "ssl") + + +def is_enabled(): + cert_file = CONF.ssl.cert_file + key_file = CONF.ssl.key_file + ca_file = CONF.ssl.ca_file + use_ssl = cert_file or key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError(_("Unable to find key_file : %s") % key_file) + + if use_ssl and (not cert_file or not key_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + return use_ssl + + +def wrap(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl.cert_file, + 'keyfile': CONF.ssl.key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl.ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl.ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) diff --git a/portas/portas/openstack/common/threadgroup.py b/portas/portas/openstack/common/threadgroup.py new file mode 100644 index 0000000..1835ac7 --- /dev/null +++ b/portas/portas/openstack/common/threadgroup.py @@ -0,0 +1,114 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenlet +from eventlet import greenpool +from eventlet import greenthread + +from portas.openstack.common import log as logging +from portas.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """ Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """ Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + +class ThreadGroup(object): + """ The point of the ThreadGroup classis to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.LoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + + def thread_done(self, thread): + self.threads.remove(thread) + + def stop(self): + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + continue + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/portas/portas/openstack/common/timeutils.py b/portas/portas/openstack/common/timeutils.py new file mode 100644 index 0000000..e2c2740 --- /dev/null +++ b/portas/portas/openstack/common/timeutils.py @@ -0,0 +1,182 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime + +import iso8601 + + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" +PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" + + +def isotime(at=None): + """Stringify time in ISO 8601 format""" + if not at: + at = utcnow() + str = at.strftime(TIME_FORMAT) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + str += ('Z' if tz == 'UTC' else tz) + return str + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(e.message) + except TypeError as e: + raise ValueError(e.message) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, basestring): + before = parse_strtime(before).replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, basestring): + after = parse_strtime(after).replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formated date from timestamp""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """ + Override utils.utcnow to return a constant time or a list thereof, + one at a time. + """ + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times.""" + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """ + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """ + Determines if time is going to happen in the next window seconds. + + :params dt: the time + :params window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/portas/portas/openstack/common/uuidutils.py b/portas/portas/openstack/common/uuidutils.py new file mode 100644 index 0000000..7608acb --- /dev/null +++ b/portas/portas/openstack/common/uuidutils.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/portas/portas/openstack/common/version.py b/portas/portas/openstack/common/version.py new file mode 100644 index 0000000..5c74fd2 --- /dev/null +++ b/portas/portas/openstack/common/version.py @@ -0,0 +1,94 @@ + +# Copyright 2012 OpenStack LLC +# Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utilities for consuming the version from pkg_resources. +""" + +import pkg_resources + + +class VersionInfo(object): + + def __init__(self, package): + """Object that understands versioning for a package + :param package: name of the python package, such as glance, or + python-glanceclient + """ + self.package = package + self.release = None + self.version = None + self._cached_version = None + + def __str__(self): + """Make the VersionInfo object behave like a string.""" + return self.version_string() + + def __repr__(self): + """Include the name.""" + return "VersionInfo(%s:%s)" % (self.package, self.version_string()) + + def _get_version_from_pkg_resources(self): + """Get the version of the package from the pkg_resources record + associated with the package.""" + try: + requirement = pkg_resources.Requirement.parse(self.package) + provider = pkg_resources.get_provider(requirement) + return provider.version + except pkg_resources.DistributionNotFound: + # The most likely cause for this is running tests in a tree + # produced from a tarball where the package itself has not been + # installed into anything. Revert to setup-time logic. + from portas.openstack.common import setup + return setup.get_version(self.package) + + def release_string(self): + """Return the full version of the package including suffixes indicating + VCS status. + """ + if self.release is None: + self.release = self._get_version_from_pkg_resources() + + return self.release + + def version_string(self): + """Return the short version minus any alpha/beta tags.""" + if self.version is None: + parts = [] + for part in self.release_string().split('.'): + if part[0].isdigit(): + parts.append(part) + else: + break + self.version = ".".join(parts) + + return self.version + + # Compatibility functions + canonical_version_string = version_string + version_string_with_vcs = release_string + + def cached_version_string(self, prefix=""): + """Generate an object which will expand in a string context to + the results of version_string(). We do this so that don't + call into pkg_resources every time we start up a program when + passing version information into the CONF constructor, but + rather only do the calculation when and if a version is requested + """ + if not self._cached_version: + self._cached_version = "%s%s" % (prefix, + self.version_string()) + return self._cached_version diff --git a/portas/portas/openstack/common/wsgi.py b/portas/portas/openstack/common/wsgi.py new file mode 100644 index 0000000..4dd43e6 --- /dev/null +++ b/portas/portas/openstack/common/wsgi.py @@ -0,0 +1,797 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utility methods for working with WSGI servers.""" + +import eventlet +eventlet.patcher.monkey_patch(all=False, socket=True) + +import datetime +import errno +import socket +import sys +import time + +import eventlet.wsgi +from oslo.config import cfg +import routes +import routes.middleware +import webob.dec +import webob.exc +from xml.dom import minidom +from xml.parsers import expat + +from portas.openstack.common import exception +from portas.openstack.common.gettextutils import _ +from portas.openstack.common import jsonutils +from portas.openstack.common import log as logging +from portas.openstack.common import service +from portas.openstack.common import sslutils +from portas.openstack.common import xmlutils + +socket_opts = [ + cfg.IntOpt('backlog', + default=4096, + help="Number of backlog requests to configure the socket with"), + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X."), +] + +CONF = cfg.CONF +CONF.register_opts(socket_opts) + +LOG = logging.getLogger(__name__) + + +def run_server(application, port): + """Run a WSGI server with the given application.""" + sock = eventlet.listen(('0.0.0.0', port)) + eventlet.wsgi.server(sock, application) + + +class Service(service.Service): + """ + Provides a Service API for wsgi servers. + + This gives us the ability to launch wsgi servers with the + Launcher classes in service.py. + """ + + def __init__(self, application, port, + host='0.0.0.0', backlog=4096, threads=1000): + self.application = application + self._port = port + self._host = host + self._backlog = backlog if backlog else CONF.backlog + super(Service, self).__init__(threads) + + def _get_socket(self, host, port, backlog): + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + info = socket.getaddrinfo(host, + port, + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + + sock = None + retry_until = time.time() + 30 + while not sock and time.time() < retry_until: + try: + sock = eventlet.listen(bind_addr, + backlog=backlog, + family=family) + if sslutils.is_enabled(): + sock = sslutils.wrap(sock) + + except socket.error, err: + if err.args[0] != errno.EADDRINUSE: + raise + eventlet.sleep(0.1) + if not sock: + raise RuntimeError(_("Could not bind to %(host)s:%(port)s " + "after trying for 30 seconds") % + {'host': host, 'port': port}) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + return sock + + def start(self): + """Start serving this service using the provided server instance. + + :returns: None + + """ + super(Service, self).start() + self._socket = self._get_socket(self._host, self._port, self._backlog) + self.tg.add_thread(self._run, self.application, self._socket) + + @property + def backlog(self): + return self._backlog + + @property + def host(self): + return self._socket.getsockname()[0] if self._socket else self._host + + @property + def port(self): + return self._socket.getsockname()[1] if self._socket else self._port + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + super(Service, self).stop() + + def _run(self, application, socket): + """Start a WSGI server in a new green thread.""" + logger = logging.getLogger('eventlet.wsgi') + eventlet.wsgi.server(socket, + application, + custom_pool=self.tg.pool, + log=logging.WritableLogger(logger)) + + +class Middleware(object): + """ + Base WSGI middleware wrapper. These classes require an application to be + initialized that will be called next. By default the middleware will + simply call its wrapped app, or you can override __call__ to customize its + behavior. + """ + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """ + Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Debug(Middleware): + """ + Helper class that can be inserted into any WSGI application chain + to get information about the request and response. + """ + + @webob.dec.wsgify + def __call__(self, req): + print ("*" * 40) + " REQUEST ENVIRON" + for key, value in req.environ.items(): + print key, "=", value + print + resp = req.get_response(self.application) + + print ("*" * 40) + " RESPONSE HEADERS" + for (key, value) in resp.headers.iteritems(): + print key, "=", value + print + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """ + Iterator that prints the contents of a wrapper string iterator + when iterated. + """ + print ("*" * 40) + " BODY" + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print + + +class Router(object): + + """ + WSGI middleware that maps incoming requests to WSGI apps. + """ + + def __init__(self, mapper): + """ + Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, "/svrlist", controller=sc, action="list") + + # Actions are all implicitly defined + mapper.resource("server", "servers", controller=sc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify + def __call__(self, req): + """ + Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ + return self._router + + @staticmethod + @webob.dec.wsgify + def _dispatch(req): + """ + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] + return app + + +class Request(webob.Request): + """Add some Openstack API-specific logic to the base webob.Request.""" + + default_request_content_types = ('application/json', 'application/xml') + default_accept_types = ('application/json', 'application/xml') + default_accept_type = 'application/json' + + def best_match_content_type(self, supported_content_types=None): + """Determine the requested response content-type. + + Based on the query extension then the Accept header. + Defaults to default_accept_type if we don't find a preference + + """ + supported_content_types = (supported_content_types or + self.default_accept_types) + + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + ctype = 'application/{0}'.format(parts[1]) + if ctype in supported_content_types: + return ctype + + bm = self.accept.best_match(supported_content_types) + return bm or self.default_accept_type + + def get_content_type(self, allowed_content_types=None): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if "Content-Type" not in self.headers: + return None + + content_type = self.content_type + allowed_content_types = (allowed_content_types or + self.default_request_content_types) + + if content_type not in allowed_content_types: + raise exception.InvalidContentType(content_type=content_type) + return content_type + + +class Resource(object): + """ + WSGI app that handles (de)serialization and controller dispatch. + + Reads routing information supplied by RoutesMiddleware and calls + the requested action method upon its deserializer, controller, + and serializer. Those three objects may implement any of the basic + controller action methods (create, update, show, index, delete) + along with any that may be specified in the api router. A 'default' + method may also be implemented to be used in place of any + non-implemented actions. Deserializer methods must accept a request + argument and return a dictionary. Controller methods must accept a + request argument. Additionally, they must also accept keyword + arguments that represent the keys returned by the Deserializer. They + may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + """ + def __init__(self, controller, deserializer=None, serializer=None): + """ + :param controller: object that implement methods created by routes lib + :param deserializer: object that supports webob request deserialization + through controller-like actions + :param serializer: object that supports webob response serialization + through controller-like actions + """ + self.controller = controller + self.serializer = serializer or ResponseSerializer() + self.deserializer = deserializer or RequestDeserializer() + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + try: + action, action_args, accept = self.deserialize_request(request) + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + return webob.exc.HTTPUnsupportedMediaType(explanation=msg) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return webob.exc.HTTPBadRequest(explanation=msg) + + action_result = self.execute_action(action, request, **action_args) + try: + return self.serialize_response(action, action_result, accept) + # return unserializable result (typically a webob exc) + except Exception: + return action_result + + def deserialize_request(self, request): + return self.deserializer.deserialize(request) + + def serialize_response(self, action, action_result, accept): + return self.serializer.serialize(action_result, accept, action) + + def execute_action(self, action, request, **action_args): + return self.dispatch(self.controller, action, request, **action_args) + + def dispatch(self, obj, action, *args, **kwargs): + """Find action-specific method on self and call it.""" + try: + method = getattr(obj, action) + except AttributeError: + method = getattr(obj, 'default') + + return method(*args, **kwargs) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class DictSerializer(ActionDispatcher): + """Default request body serialization""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization""" + + def default(self, data): + def sanitizer(obj): + if isinstance(obj, datetime.datetime): + _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) + return _dtime.isoformat() + return unicode(obj) + return jsonutils.dumps(data, default=sanitizer) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def default(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + return self.to_xml_string(node) + + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toprettyxml(indent=' ', encoding='UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, has_atom=False): + if self.xmlns is not None: + node.setAttribute('xmlns', self.xmlns) + if has_atom: + node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + #TODO(bcwaldon): accomplish this without a type-check + if type(data) is list: + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check + elif type(data) is dict: + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + def _create_link_nodes(self, xml_doc, links): + link_nodes = [] + for link in links: + link_node = xml_doc.createElement('atom:link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) + link_nodes.append(link_node) + return link_nodes + + +class ResponseHeadersSerializer(ActionDispatcher): + """Default response headers serialization""" + + def serialize(self, response, data, action): + self.dispatch(response, data, action=action) + + def default(self, response, data): + response.status_int = 200 + + +class ResponseSerializer(object): + """Encode the necessary pieces into a response object""" + + def __init__(self, body_serializers=None, headers_serializer=None): + self.body_serializers = { + 'application/xml': XMLDictSerializer(), + 'application/json': JSONDictSerializer(), + } + self.body_serializers.update(body_serializers or {}) + + self.headers_serializer = (headers_serializer or + ResponseHeadersSerializer()) + + def serialize(self, response_data, content_type, action='default'): + """Serialize a dict into a string and wrap in a wsgi.Request object. + + :param response_data: dict produced by the Controller + :param content_type: expected mimetype of serialized response body + + """ + response = webob.Response() + self.serialize_headers(response, response_data, action) + self.serialize_body(response, response_data, content_type, action) + return response + + def serialize_headers(self, response, data, action): + self.headers_serializer.serialize(response, data, action) + + def serialize_body(self, response, data, content_type, action): + response.headers['Content-Type'] = content_type + if data is not None: + serializer = self.get_body_serializer(content_type) + response.body = serializer.serialize(data, action) + + def get_body_serializer(self, content_type): + try: + return self.body_serializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + +class RequestHeadersDeserializer(ActionDispatcher): + """Default request headers deserializer""" + + def deserialize(self, request, action): + return self.dispatch(request, action=action) + + def default(self, request): + return {} + + +class RequestDeserializer(object): + """Break up a Request object into more useful pieces.""" + + def __init__(self, body_deserializers=None, headers_deserializer=None, + supported_content_types=None): + + self.supported_content_types = supported_content_types + + self.body_deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + self.body_deserializers.update(body_deserializers or {}) + + self.headers_deserializer = (headers_deserializer or + RequestHeadersDeserializer()) + + def deserialize(self, request): + """Extract necessary pieces of the request. + + :param request: Request object + :returns: tuple of (expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response) + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + action_args.update(self.deserialize_headers(request, action)) + action_args.update(self.deserialize_body(request, action)) + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def deserialize_headers(self, request, action): + return self.headers_deserializer.deserialize(request, action) + + def deserialize_body(self, request, action): + if not len(request.body) > 0: + LOG.debug(_("Empty body provided in request")) + return {} + + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + raise + + if content_type is None: + LOG.debug(_("No Content-Type provided in request")) + return {} + + try: + deserializer = self.get_body_deserializer(content_type) + except exception.InvalidContentType: + LOG.debug(_("Unable to deserialize body as provided Content-Type")) + raise + + return deserializer.deserialize(request.body, action) + + def get_body_deserializer(self, content_type): + try: + return self.body_deserializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def get_expected_content_type(self, request): + return request.best_match_content_type(self.supported_content_types) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def _from_xml(self, datastring): + plurals = set(self.metadata.get('plurals', {})) + + try: + node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + def find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name""" + for node in parent.childNodes: + if node.nodeName == name: + return node + return None + + def find_children_named(self, parent, name): + """Return all of a nodes children who have the given name""" + for node in parent.childNodes: + if node.nodeName == name: + yield node + + def extract_text(self, node): + """Get the text field contained by the given node""" + if len(node.childNodes) == 1: + child = node.childNodes[0] + if child.nodeType == child.TEXT_NODE: + return child.nodeValue + return "" + + def default(self, datastring): + return {'body': self._from_xml(datastring)} diff --git a/portas/portas/openstack/common/xmlutils.py b/portas/portas/openstack/common/xmlutils.py new file mode 100644 index 0000000..3370048 --- /dev/null +++ b/portas/portas/openstack/common/xmlutils.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.dom import minidom +from xml.parsers import expat +from xml import sax +from xml.sax import expatreader + + +class ProtectedExpatParser(expatreader.ExpatParser): + """An expat parser which disables DTD's and entities by default.""" + + def __init__(self, forbid_dtd=True, forbid_entities=True, + *args, **kwargs): + # Python 2.x old style class + expatreader.ExpatParser.__init__(self, *args, **kwargs) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + raise ValueError("Inline DTD forbidden") + + def entity_decl(self, entityName, is_parameter_entity, value, base, + systemId, publicId, notationName): + raise ValueError(" entity declaration forbidden") + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + # expat 1.2 + raise ValueError(" unparsed entity forbidden") + + def external_entity_ref(self, context, base, systemId, publicId): + raise ValueError(" external entity forbidden") + + def notation_decl(self, name, base, sysid, pubid): + raise ValueError(" notation forbidden") + + def reset(self): + expatreader.ExpatParser.reset(self) + if self.forbid_dtd: + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + self._parser.EndDoctypeDeclHandler = None + if self.forbid_entities: + self._parser.EntityDeclHandler = self.entity_decl + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + self._parser.ExternalEntityRefHandler = self.external_entity_ref + self._parser.NotationDeclHandler = self.notation_decl + try: + self._parser.SkippedEntityHandler = None + except AttributeError: + # some pyexpat versions do not support SkippedEntity + pass + + +def safe_minidom_parse_string(xml_string): + """Parse an XML string using minidom safely. + + """ + try: + return minidom.parseString(xml_string, parser=ProtectedExpatParser()) + except sax.SAXParseException: + raise expat.ExpatError() diff --git a/portas/portas/schema.py b/portas/portas/schema.py new file mode 100644 index 0000000..b16c33f --- /dev/null +++ b/portas/portas/schema.py @@ -0,0 +1,107 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import jsonschema + +from portas.common import exception + + +class Schema(object): + + def __init__(self, name, properties=None, links=None): + self.name = name + if properties is None: + properties = {} + self.properties = properties + self.links = links + + def validate(self, obj): + try: + jsonschema.validate(obj, self.raw()) + except jsonschema.ValidationError as e: + raise exception.InvalidObject(schema=self.name, reason=str(e)) + + def filter(self, obj): + filtered = {} + for key, value in obj.iteritems(): + if self._filter_func(self.properties, key) and value is not None: + filtered[key] = value + return filtered + + @staticmethod + def _filter_func(properties, key): + return key in properties + + def merge_properties(self, properties): + # Ensure custom props aren't attempting to override base props + original_keys = set(self.properties.keys()) + new_keys = set(properties.keys()) + intersecting_keys = original_keys.intersection(new_keys) + conflicting_keys = [k for k in intersecting_keys + if self.properties[k] != properties[k]] + if len(conflicting_keys) > 0: + props = ', '.join(conflicting_keys) + reason = _("custom properties (%(props)s) conflict " + "with base properties") + raise exception.SchemaLoadError(reason=reason % {'props': props}) + + self.properties.update(properties) + + def raw(self): + raw = { + 'name': self.name, + 'properties': self.properties, + 'additionalProperties': False, + } + if self.links: + raw['links'] = self.links + return raw + + +class PermissiveSchema(Schema): + @staticmethod + def _filter_func(properties, key): + return True + + def raw(self): + raw = super(PermissiveSchema, self).raw() + raw['additionalProperties'] = {'type': 'string'} + return raw + + +class CollectionSchema(object): + + def __init__(self, name, item_schema): + self.name = name + self.item_schema = item_schema + + def raw(self): + return { + 'name': self.name, + 'properties': { + self.name: { + 'type': 'array', + 'items': self.item_schema.raw(), + }, + 'first': {'type': 'string'}, + 'next': {'type': 'string'}, + 'schema': {'type': 'string'}, + }, + 'links': [ + {'rel': 'first', 'href': '{first}'}, + {'rel': 'next', 'href': '{next}'}, + {'rel': 'describedby', 'href': '{schema}'}, + ], + } diff --git a/portas/portas/tests/__init__.py b/portas/portas/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/portas/portas/tests/api/__init__.py b/portas/portas/tests/api/__init__.py new file mode 100644 index 0000000..0a754b6 --- /dev/null +++ b/portas/portas/tests/api/__init__.py @@ -0,0 +1 @@ +__author__ = 'sad' diff --git a/portas/portas/tests/api/simple_test.py b/portas/portas/tests/api/simple_test.py new file mode 100644 index 0000000..bad8dde --- /dev/null +++ b/portas/portas/tests/api/simple_test.py @@ -0,0 +1,6 @@ +import unittest + + +class Test(unittest.TestCase): + def test(self): + assert False diff --git a/portas/portas/version.py b/portas/portas/version.py new file mode 100644 index 0000000..5a5db6a --- /dev/null +++ b/portas/portas/version.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from glance.openstack.common import version as common_version + +version_info = common_version.VersionInfo('glance') diff --git a/portas/run_tests.sh b/portas/run_tests.sh new file mode 100755 index 0000000..f99249f --- /dev/null +++ b/portas/run_tests.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Portas's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " --unittests-only Run unit tests only, exclude functional tests." + echo " -p, --pep8 Just run pep8" + echo " -P, --no-pep8 Don't run static code checks" + echo " -h, --help Print this usage message" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) let always_venv=1; let never_venv=0;; + -N|--no-virtual-env) let always_venv=0; let never_venv=1;; + -p|--pep8) let just_pep8=1;; + -P|--no-pep8) let no_pep8=1;; + -f|--force) let force=1;; + -u|--update) update=1;; + --unittests-only) noseopts="$noseopts --exclude-dir=glance/tests/functional";; + -c|--coverage) noseopts="$noseopts --with-coverage --cover-package=glance";; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseopts= +noseargs= +wrapper="" +just_pep8=0 +no_pep8=0 +update=0 + +export NOSE_WITH_OPENSTACK=1 +export NOSE_OPENSTACK_COLOR=1 +export NOSE_OPENSTACK_RED=0.05 +export NOSE_OPENSTACK_YELLOW=0.025 +export NOSE_OPENSTACK_SHOW_ELAPSED=1 +export NOSE_OPENSTACK_STDOUT=1 + +for arg in "$@"; do + process_option $arg +done + +function run_tests { + # Cleanup *pyc + ${wrapper} find . -type f -name "*.pyc" -delete + # Just run the test suites in current environment + ${wrapper} rm -f tests.sqlite + ${wrapper} $NOSETESTS +} + +function run_pep8 { + echo "Running pep8 ..." + PEP8_EXCLUDE=".venv,.tox,dist,doc,openstack" + PEP8_OPTIONS="--exclude=$PEP8_EXCLUDE --repeat" + PEP8_IGNORE="--ignore=E125,E126,E711,E712" + PEP8_INCLUDE=". bin/*" + + ${wrapper} pep8 $PEP8_OPTIONS $PEP8_INCLUDE $PEP8_IGNORE +} + + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +if [ $just_pep8 -eq 1 ]; then + run_pep8 + exit +fi + +run_tests || exit + +if [ -z "$noseargs" ]; then + if [ $no_pep8 -eq 0 ]; then + run_pep8 + fi +fi diff --git a/portas/setup.cfg b/portas/setup.cfg new file mode 100644 index 0000000..d53addc --- /dev/null +++ b/portas/setup.cfg @@ -0,0 +1,9 @@ +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 diff --git a/portas/setup.py b/portas/setup.py new file mode 100644 index 0000000..a0cfe4d --- /dev/null +++ b/portas/setup.py @@ -0,0 +1,48 @@ +#!/usr/bin/python +# Copyright (c) 2010 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import setuptools + +from portas.openstack.common import setup + +requires = setup.parse_requirements() +depend_links = setup.parse_dependency_links() +project = 'portas' + +setuptools.setup( + name=project, + version=setup.get_version(project, '2013.1'), + description='The Portas project provides a simple WSGI server for Windows Environment Management', + license='Apache License (2.0)', + author='OpenStack', + author_email='openstack@lists.launchpad.net', + url='http://portas.openstack.org/', + packages=setuptools.find_packages(exclude=['bin']), + test_suite='nose.collector', + cmdclass=setup.get_cmdclass(), + include_package_data=True, + install_requires=requires, + dependency_links=depend_links, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python :: 2.7', + 'Environment :: No Input/Output (Daemon)', + 'Environment :: OpenStack', + ], + scripts=['bin/portas-api'], + py_modules=[]) \ No newline at end of file diff --git a/portas/tools/install_venv.py b/portas/tools/install_venv.py new file mode 100644 index 0000000..ccaaa5e --- /dev/null +++ b/portas/tools/install_venv.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack LLC. +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Installation script for Glance's development virtualenv +""" + +import os +import subprocess +import sys + +import install_venv_common as install_venv + + +def print_help(): + help = """ + Portas development environment setup is complete. + + Portas development uses virtualenv to track and manage Python dependencies + while in development and testing. + + To activate the Portas virtualenv for the extent of your current shell session + you can run: + + $ source .venv/bin/activate + + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: + + $ tools/with_venv.sh + + Also, make test will automatically use the virtualenv. + """ + print help + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + venv = os.path.join(root, '.venv') + pip_requires = os.path.join(root, 'tools', 'pip-requires') + test_requires = os.path.join(root, 'tools', 'test-requires') + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'Portas' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + options = install.parse_args(argv) + install.check_python_version() + install.check_dependencies() + install.create_virtualenv(no_site_packages=options.no_site_packages) + install.install_dependencies() + install.run_command([os.path.join(venv, 'bin/python'), + 'setup.py', 'develop']) + install.post_process() + print_help() + +if __name__ == '__main__': + main(sys.argv) diff --git a/portas/tools/install_venv_common.py b/portas/tools/install_venv_common.py new file mode 100644 index 0000000..fd9076f --- /dev/null +++ b/portas/tools/install_venv_common.py @@ -0,0 +1,219 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack, LLC +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides methods needed by installation script for OpenStack development +virtual environments. + +Synced in from openstack-common +""" + +import argparse +import os +import subprocess +import sys + + +class InstallVenv(object): + + def __init__(self, root, venv, pip_requires, test_requires, py_version, + project): + self.root = root + self.venv = venv + self.pip_requires = pip_requires + self.test_requires = test_requires + self.py_version = py_version + self.project = project + + def die(self, message, *args): + print >> sys.stderr, message % args + sys.exit(1) + + def check_python_version(self): + if sys.version_info < (2, 6): + self.die("Need Python Version >= 2.6") + + def run_command_with_code(self, cmd, redirect_output=True, + check_exit_code=True): + """Runs a command in an out-of-process shell. + + Returns the output of that command. Working directory is self.root. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return (output, proc.returncode) + + def run_command(self, cmd, redirect_output=True, check_exit_code=True): + return self.run_command_with_code(cmd, redirect_output, + check_exit_code)[0] + + def get_distro(self): + if (os.path.exists('/etc/fedora-release') or + os.path.exists('/etc/redhat-release')): + return Fedora(self.root, self.venv, self.pip_requires, + self.test_requires, self.py_version, self.project) + else: + return Distro(self.root, self.venv, self.pip_requires, + self.test_requires, self.py_version, self.project) + + def check_dependencies(self): + self.get_distro().install_virtualenv() + + def create_virtualenv(self, no_site_packages=True): + """Creates the virtual environment and installs PIP. + + Creates the virtual environment and installs PIP only into the + virtual environment. + """ + if not os.path.isdir(self.venv): + print 'Creating venv...', + if no_site_packages: + self.run_command(['virtualenv', '-q', '--no-site-packages', + self.venv]) + else: + self.run_command(['virtualenv', '-q', self.venv]) + print 'done.' + print 'Installing pip in venv...', + if not self.run_command(['tools/with_venv.sh', 'easy_install', + 'pip>1.0']).strip(): + self.die("Failed to install pip.") + print 'done.' + else: + print "venv already exists..." + pass + + def pip_install(self, *args): + self.run_command(['tools/with_venv.sh', + 'pip', 'install', '--upgrade'] + list(args), + redirect_output=False) + + def install_dependencies(self): + print 'Installing dependencies with pip (this can take a while)...' + + # First things first, make sure our venv has the latest pip and + # distribute. + # NOTE: we keep pip at version 1.1 since the most recent version causes + # the .venv creation to fail. See: + # https://bugs.launchpad.net/nova/+bug/1047120 + self.pip_install('pip==1.1') + self.pip_install('distribute') + + # Install greenlet by hand - just listing it in the requires file does + # not + # get it installed in the right order + self.pip_install('greenlet') + + self.pip_install('-r', self.pip_requires) + self.pip_install('-r', self.test_requires) + + def post_process(self): + self.get_distro().post_process() + + def parse_args(self, argv): + """Parses command-line arguments.""" + parser = argparse.ArgumentParser() + parser.add_argument('-n', '--no-site-packages', + action='store_true', + help="Do not inherit packages from global Python " + "install") + return parser.parse_args(argv[1:]) + + +class Distro(InstallVenv): + + def check_cmd(self, cmd): + return bool(self.run_command(['which', cmd], + check_exit_code=False).strip()) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if self.check_cmd('easy_install'): + print 'Installing virtualenv via easy_install...', + if self.run_command(['easy_install', 'virtualenv']): + print 'Succeeded' + return + else: + print 'Failed' + + self.die('ERROR: virtualenv not found.\n\n%s development' + ' requires virtualenv, please install it using your' + ' favorite package management tool' % self.project) + + def post_process(self): + """Any distribution-specific post-processing gets done here. + + In particular, this is useful for applying patches to code inside + the venv. + """ + pass + + +class Fedora(Distro): + """This covers all Fedora-based distributions. + + Includes: Fedora, RHEL, CentOS, Scientific Linux + """ + + def check_pkg(self, pkg): + return self.run_command_with_code(['rpm', '-q', pkg], + check_exit_code=False)[1] == 0 + + def yum_install(self, pkg, **kwargs): + print "Attempting to install '%s' via yum" % pkg + self.run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs) + + def apply_patch(self, originalfile, patchfile): + self.run_command(['patch', originalfile, patchfile]) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if not self.check_pkg('python-virtualenv'): + self.yum_install('python-virtualenv', check_exit_code=False) + + super(Fedora, self).install_virtualenv() + + def post_process(self): + """Workaround for a bug in eventlet. + + This currently affects RHEL6.1, but the fix can safely be + applied to all RHEL and Fedora distributions. + + This can be removed when the fix is applied upstream. + + Nova: https://bugs.launchpad.net/nova/+bug/884915 + Upstream: https://bitbucket.org/which_linden/eventlet/issue/89 + """ + + # Install "patch" program if it's not there + if not self.check_pkg('patch'): + self.yum_install('patch') + + # Apply the eventlet patch + self.apply_patch(os.path.join(self.venv, 'lib', self.py_version, + 'site-packages', + 'eventlet/green/subprocess.py'), + 'contrib/redhat-eventlet.patch') diff --git a/portas/tools/pip-requires b/portas/tools/pip-requires new file mode 100644 index 0000000..20109d9 --- /dev/null +++ b/portas/tools/pip-requires @@ -0,0 +1,23 @@ +# The greenlet package must be compiled with gcc and needs +# the Python.h headers. Make sure you install the python-dev +# package to get the right headers... +greenlet>=0.3.1 + +SQLAlchemy<=0.7.9 +anyjson +eventlet>=0.9.12 +PasteDeploy +Routes +webob==1.0.8 +wsgiref +argparse +sqlalchemy-migrate>=0.7.2 +httplib2 +kombu +iso8601>=0.1.4 +PyChef +# For paste.util.template used in keystone.common.template +Paste + +passlib +puka diff --git a/portas/tools/test-requires b/portas/tools/test-requires new file mode 100644 index 0000000..e69de29 diff --git a/portas/tools/with_venv.sh b/portas/tools/with_venv.sh new file mode 100755 index 0000000..c8d2940 --- /dev/null +++ b/portas/tools/with_venv.sh @@ -0,0 +1,4 @@ +#!/bin/bash +TOOLS=`dirname $0` +VENV=$TOOLS/../.venv +source $VENV/bin/activate && $@