diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 66c73ea..0000000 --- a/.coveragerc +++ /dev/null @@ -1,8 +0,0 @@ -[run] -branch = True -source = oslo_db -omit = oslo_db/tests/* - -[report] -ignore_errors = True -precision = 2 diff --git a/.gitignore b/.gitignore deleted file mode 100644 index a24c644..0000000 --- a/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -*~ -*.swp -*.pyc -*.log -.coverage -.venv -.tox -cover/ -.openstack-common-venv/ -skeleton.egg-info/ -build/ -dist/ -AUTHORS -.update-venv/ -ChangeLog -*.egg -.testrepository/ -.project -.pydevproject -oslo.db.egg-info/ -doc/source/api diff --git a/.gitreview b/.gitreview deleted file mode 100644 index aee0565..0000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/oslo.db.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index cc92f17..0000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# \ No newline at end of file diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index c9be815..0000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./oslo_db/tests $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index aa704f9..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,89 +0,0 @@ -================= -How to contribute -================= - -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - http://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/oslo.db - - -How to run unit tests -===================== - -oslo.db (as all OpenStack projects) uses tox to run unit tests. You can find -general information about OpenStack unit tests and testing with tox in wiki_. - -oslo.db tests use PyMySQL as the default MySQL DB API driver (which is true for -OpenStack), and psycopg2 for PostgreSQL. pip will build these libs in your -venv, so you must ensure that you have the required system packages installed -for psycopg2 (PyMySQL is a pure-Python implementation and so needs no -additional system packages). For Ubuntu/Debian they are python-dev, and -libpq-dev. For Fedora/CentOS - gcc, python-devel and postgresql-devel. -There is also a separate env for testing with MySQL-python. If you are suppose -to run these tests as well, you need to install libmysqlclient-dev on -Ubuntu/Debian or mysql-devel for Fedora/CentOS. - -The oslo.db unit tests system allows to run unittests on real databases. At the -moment it supports MySQL, PostgreSQL and SQLite. -For testing on a real database backend you need to set up a user -``openstack_citest`` with password ``openstack_citest`` on localhost (some -OpenStack projects require a database named 'openstack_citest' too). -Please note, that this user must have permissions to create and drop databases. -If the testing system is not able to connect to the backend, tests on it will -be skipped. - -For PostgreSQL on Ubuntu you can create a user in the following way:: - - sudo -u postgres psql - postgres=# create user openstack_citest with createdb login password - 'openstack_citest'; - -For MySQL you can use the following commands:: - - mysql -u root - mysql> CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY - 'openstack_citest'; - mysql> GRANT ALL PRIVILEGES ON * . * TO 'openstack_citest'@'localhost'; - mysql> FLUSH PRIVILEGES; - -Alternatively, you can use `pifpaf`_ to run the unit tests directly without -setting up the database yourself. You still need to have the database software -installed on your system. The following tox environments can be used:: - - tox -e py27-mysql - tox -e py27-postgresql - tox -e py34-mysql - tox -e py34-postgresql - tox -e py27-all - tox -e py34-all - -The database will be set up for you locally and temporarily on each run. - -Another way is to start `pifpaf` manually and use it to run the tests as you -wish:: - - $ eval `pifpaf -g OS_TEST_DBAPI_ADMIN_CONNECTION run postgresql` - $ echo $OS_TEST_DBAPI_ADMIN_CONNECTION - postgresql://localhost/postgres?host=/var/folders/7k/pwdhb_mj2cv4zyr0kyrlzjx40000gq/T/tmpMGqN8C&port=9824 - $ tox -e py27 - […] - $ tox -e py34 - […] - # Kill pifpaf once you're done - $ kill $PIFPAF_PID - -.. _wiki: https://wiki.openstack.org/wiki/Testing#Unit_Tests -.. _pifpaf: https://github.com/jd/pifpaf diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index cb07476..0000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -Style Commandments -================== - -Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/README.rst b/README.rst deleted file mode 100644 index ee40d27..0000000 --- a/README.rst +++ /dev/null @@ -1,20 +0,0 @@ -=============================================== - oslo.db -- OpenStack Database Pattern Library -=============================================== - -.. image:: https://img.shields.io/pypi/v/oslo.db.svg - :target: https://pypi.python.org/pypi/oslo.db/ - :alt: Latest Version - -.. image:: https://img.shields.io/pypi/dm/oslo.db.svg - :target: https://pypi.python.org/pypi/oslo.db/ - :alt: Downloads - -The oslo db (database) handling library, provides database -connectivity to different database backends and various other helper -utils. - -* Free software: Apache license -* Documentation: http://docs.openstack.org/developer/oslo.db -* Source: http://git.openstack.org/cgit/openstack/oslo.db -* Bugs: http://bugs.launchpad.net/oslo.db diff --git a/README.txt b/README.txt new file mode 100644 index 0000000..aeb923d --- /dev/null +++ b/README.txt @@ -0,0 +1,13 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +Use instead the project deb-python-oslo.db at +http://git.openstack.org/cgit/openstack/deb-python-oslo.db . + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab8..0000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100755 index 00f65cb..0000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - #'sphinx.ext.intersphinx', - 'oslo_config.sphinxext', - 'oslosphinx', - 'stevedore.sphinxext' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# A list of glob-style patterns that should be excluded when looking for source -# files. -exclude_patterns = [ - 'api/setup.rst', # workaround for https://launchpad.net/bugs/1260495 - 'api/tests.*', # avoid of docs generation from tests -] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'oslo.db' -copyright = u'2014, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['oslo_db.'] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index ac7b6bc..0000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../CONTRIBUTING.rst diff --git a/doc/source/history.rst b/doc/source/history.rst deleted file mode 100644 index 69ed4fe..0000000 --- a/doc/source/history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../ChangeLog diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index de4d909..0000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,31 +0,0 @@ -=============================================== - oslo.db -- OpenStack Database Pattern Library -=============================================== - -The oslo.db (database) handling library, provides database -connectivity to different database backends and various other helper -utils. - -.. toctree:: - :maxdepth: 2 - - installation - opts - usage - contributing - history - -API Documentation -================= - -.. toctree:: - :maxdepth: 1 - - api/autoindex - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/installation.rst b/doc/source/installation.rst deleted file mode 100644 index 1262160..0000000 --- a/doc/source/installation.rst +++ /dev/null @@ -1,49 +0,0 @@ -============ -Installation -============ - -At the command line:: - - $ pip install oslo.db - -You will also need to install at least one SQL backend:: - - $ pip install psycopg2 - -Or:: - - $ pip install PyMySQL - -Or:: - - $ pip install pysqlite - - -Using with PostgreSQL ---------------------- - -If you are using PostgreSQL make sure to install the PostgreSQL client -development package for your distro. On Ubuntu this is done as follows:: - - $ sudo apt-get install libpq-dev - $ pip install psycopg2 - -The installation of psycopg2 will fail if libpq-dev is not installed first. -Note that even in a virtual environment the libpq-dev will be installed -system wide. - - -Using with MySQL-python ------------------------ - -PyMySQL is a default MySQL DB API driver for oslo.db, as well as for the whole -OpenStack. But you still can use MySQL-python as an alternative DB API driver. -For MySQL-python you must install the MySQL client development package for -your distro. On Ubuntu this is done as follows:: - - $ sudo apt-get install libmysqlclient-dev - $ pip install MySQL-python - -The installation of MySQL-python will fail if libmysqlclient-dev is not -installed first. Note that even in a virtual environment the MySQL package will -be installed system wide. diff --git a/doc/source/opts.rst b/doc/source/opts.rst deleted file mode 100644 index aa6f145..0000000 --- a/doc/source/opts.rst +++ /dev/null @@ -1,9 +0,0 @@ -===================== -Configuration Options -===================== - -oslo.db uses oslo.config to define and manage configuration -options to allow the deployer to control how an application uses the -underlying database. - -.. show-options:: oslo.db diff --git a/doc/source/usage.rst b/doc/source/usage.rst deleted file mode 100644 index 448afaa..0000000 --- a/doc/source/usage.rst +++ /dev/null @@ -1,169 +0,0 @@ -======= - Usage -======= - -To use oslo.db in a project: - -Session Handling -================ - -Session handling is achieved using the :mod:`oslo_db.sqlalchemy.enginefacade` -system. This module presents a function decorator as well as a -context manager approach to delivering :class:`.Session` as well as -:class:`.Connection` objects to a function or block. - -Both calling styles require the use of a context object. This object may -be of any class, though when used with the decorator form, requires -special instrumentation. - -The context manager form is as follows: - -.. code:: python - - - from oslo_db.sqlalchemy import enginefacade - - - class MyContext(object): - "User-defined context class." - - - def some_reader_api_function(context): - with enginefacade.reader.using(context) as session: - return session.query(SomeClass).all() - - - def some_writer_api_function(context, x, y): - with enginefacade.writer.using(context) as session: - session.add(SomeClass(x, y)) - - - def run_some_database_calls(): - context = MyContext() - - results = some_reader_api_function(context) - some_writer_api_function(context, 5, 10) - - -The decorator form accesses attributes off the user-defined context -directly; the context must be decorated with the -:func:`oslo_db.sqlalchemy.enginefacade.transaction_context_provider` -decorator. Each function must receive the context argument: - -.. code:: python - - - from oslo_db.sqlalchemy import enginefacade - - @enginefacade.transaction_context_provider - class MyContext(object): - "User-defined context class." - - @enginefacade.reader - def some_reader_api_function(context): - return context.session.query(SomeClass).all() - - - @enginefacade.writer - def some_writer_api_function(context, x, y): - context.session.add(SomeClass(x, y)) - - - def run_some_database_calls(): - context = MyContext() - - results = some_reader_api_function(context) - some_writer_api_function(context, 5, 10) - -.. note:: The ``context.session`` and ``context.connection`` attributes - must be accessed within the scope of an appropriate writer/reader block - (either the decorator or contextmanager approach). An AttributeError is - raised otherwise. - - -The decorator form can also be used with class and instance methods which -implicitly receive the first positional argument: - -.. code:: python - - class DatabaseAccessLayer(object): - - @classmethod - @enginefacade.reader - def some_reader_api_function(cls, context): - return context.session.query(SomeClass).all() - - @enginefacade.writer - def some_writer_api_function(self, context, x, y): - context.session.add(SomeClass(x, y)) - -.. note:: Note that enginefacade decorators must be applied **before** - `classmethod`, otherwise you will get a ``TypeError`` at import time - (as enginefacade will try to use ``inspect.getargspec()`` on a descriptor, - not on a bound method, please refer to the `Data Model - `_ section - of the Python Language Reference for details). - - -The scope of transaction and connectivity for both approaches is managed -transparently. The configuration for the connection comes from the standard -:obj:`oslo_config.cfg.CONF` collection. Additional configurations can be -established for the enginefacade using the -:func:`oslo_db.sqlalchemy.enginefacade.configure` function, before any use of -the database begins: - -.. code:: python - - from oslo_db.sqlalchemy import enginefacade - - enginefacade.configure( - sqlite_fk=True, - max_retries=5, - mysql_sql_mode='ANSI' - ) - - -Base class for models usage -=========================== - -.. code:: python - - from oslo_db.sqlalchemy import models - - - class ProjectSomething(models.TimestampMixin, - models.ModelBase): - id = Column(Integer, primary_key=True) - ... - - -DB API backend support -====================== - -.. code:: python - - from oslo_config import cfg - from oslo_db import api as db_api - - - _BACKEND_MAPPING = {'sqlalchemy': 'project.db.sqlalchemy.api'} - - IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING) - - def get_engine(): - return IMPL.get_engine() - - def get_session(): - return IMPL.get_session() - - # DB-API method - def do_something(somethind_id): - return IMPL.do_something(somethind_id) - -DB migration extensions -======================= - -Available extensions for `oslo_db.migration`. - -.. list-plugins:: oslo_db.sqlalchemy.migration - :detailed: diff --git a/oslo_db/__init__.py b/oslo_db/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_db/_i18n.py b/oslo_db/_i18n.py deleted file mode 100644 index 8bcb7ca..0000000 --- a/oslo_db/_i18n.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html . - -""" - -import oslo_i18n - - -_translators = oslo_i18n.TranslatorFactory(domain='oslo_db') - -# The primary translation function using the well-known name "_" -_ = _translators.primary - -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical diff --git a/oslo_db/api.py b/oslo_db/api.py deleted file mode 100644 index 2f592e9..0000000 --- a/oslo_db/api.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -================================= -Multiple DB API backend support. -================================= - -A DB backend module should implement a method named 'get_backend' which -takes no arguments. The method can return any object that implements DB -API methods. -""" - -import logging -import threading -import time - -from debtcollector import removals -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import reflection -import six - -from oslo_db._i18n import _LE -from oslo_db import exception -from oslo_db import options - - -LOG = logging.getLogger(__name__) - - -def safe_for_db_retry(f): - """Indicate api method as safe for re-connection to database. - - Database connection retries will be enabled for the decorated api method. - Database connection failure can have many causes, which can be temporary. - In such cases retry may increase the likelihood of connection. - - Usage:: - - @safe_for_db_retry - def api_method(self): - self.engine.connect() - - - :param f: database api method. - :type f: function. - """ - f.__dict__['enable_retry_on_disconnect'] = True - return f - - -def retry_on_deadlock(f): - """Retry a DB API call if Deadlock was received. - - wrap_db_entry will be applied to all db.api functions marked with this - decorator. - """ - f.__dict__['enable_retry_on_deadlock'] = True - return f - - -def retry_on_request(f): - """Retry a DB API call if RetryRequest exception was received. - - wrap_db_entry will be applied to all db.api functions marked with this - decorator. - """ - f.__dict__['enable_retry_on_request'] = True - return f - - -class wrap_db_retry(object): - """Retry db.api methods, if db_error raised - - Retry decorated db.api methods. This decorator catches db_error and retries - function in a loop until it succeeds, or until maximum retries count - will be reached. - - Keyword arguments: - - :param retry_interval: seconds between transaction retries - :type retry_interval: int or float - - :param max_retries: max number of retries before an error is raised - :type max_retries: int - - :param inc_retry_interval: determine increase retry interval or not - :type inc_retry_interval: bool - - :param max_retry_interval: max interval value between retries - :type max_retry_interval: int or float - - :param exception_checker: checks if an exception should trigger a retry - :type exception_checker: callable - """ - - @removals.removed_kwarg("retry_on_request", - "Retry on request is always enabled") - def __init__(self, retry_interval=1, max_retries=20, - inc_retry_interval=True, - max_retry_interval=10, retry_on_disconnect=False, - retry_on_deadlock=False, retry_on_request=False, - exception_checker=lambda exc: False): - super(wrap_db_retry, self).__init__() - - self.db_error = (exception.RetryRequest, ) - # default is that we re-raise anything unexpected - self.exception_checker = exception_checker - if retry_on_disconnect: - self.db_error += (exception.DBConnectionError, ) - if retry_on_deadlock: - self.db_error += (exception.DBDeadlock, ) - self.retry_interval = retry_interval - self.max_retries = max_retries - self.inc_retry_interval = inc_retry_interval - self.max_retry_interval = max_retry_interval - - def __call__(self, f): - @six.wraps(f) - def wrapper(*args, **kwargs): - next_interval = self.retry_interval - remaining = self.max_retries - - while True: - try: - return f(*args, **kwargs) - except Exception as e: - with excutils.save_and_reraise_exception() as ectxt: - expected = self._is_exception_expected(e) - if remaining > 0: - ectxt.reraise = not expected - else: - if expected: - LOG.exception(_LE('DB exceeded retry limit.')) - # if it's a RetryRequest, we need to unpack it - if isinstance(e, exception.RetryRequest): - ectxt.type_ = type(e.inner_exc) - ectxt.value = e.inner_exc - LOG.debug("Performing DB retry for function %s", - reflection.get_callable_name(f)) - # NOTE(vsergeyev): We are using patched time module, so - # this effectively yields the execution - # context to another green thread. - time.sleep(next_interval) - if self.inc_retry_interval: - next_interval = min( - next_interval * 2, - self.max_retry_interval - ) - remaining -= 1 - - return wrapper - - def _is_exception_expected(self, exc): - if isinstance(exc, self.db_error): - # RetryRequest is application-initated exception - # and not an error condition in case retries are - # not exceeded - if not isinstance(exc, exception.RetryRequest): - LOG.debug('DB error: %s', exc) - return True - return self.exception_checker(exc) - - -class DBAPI(object): - """Initialize the chosen DB API backend. - - After initialization API methods is available as normal attributes of - ``DBAPI`` subclass. Database API methods are supposed to be called as - DBAPI instance methods. - - :param backend_name: name of the backend to load - :type backend_name: str - - :param backend_mapping: backend name -> module/class to load mapping - :type backend_mapping: dict - :default backend_mapping: None - - :param lazy: load the DB backend lazily on the first DB API method call - :type lazy: bool - :default lazy: False - - :keyword use_db_reconnect: retry DB transactions on disconnect or not - :type use_db_reconnect: bool - - :keyword retry_interval: seconds between transaction retries - :type retry_interval: int - - :keyword inc_retry_interval: increase retry interval or not - :type inc_retry_interval: bool - - :keyword max_retry_interval: max interval value between retries - :type max_retry_interval: int - - :keyword max_retries: max number of retries before an error is raised - :type max_retries: int - """ - - def __init__(self, backend_name, backend_mapping=None, lazy=False, - **kwargs): - - self._backend = None - self._backend_name = backend_name - self._backend_mapping = backend_mapping or {} - self._lock = threading.Lock() - - if not lazy: - self._load_backend() - - self.use_db_reconnect = kwargs.get('use_db_reconnect', False) - self._wrap_db_kwargs = {k: v for k, v in kwargs.items() - if k in ('retry_interval', - 'inc_retry_interval', - 'max_retry_interval', - 'max_retries')} - - def _load_backend(self): - with self._lock: - if not self._backend: - # Import the untranslated name if we don't have a mapping - backend_path = self._backend_mapping.get(self._backend_name, - self._backend_name) - LOG.debug('Loading backend %(name)r from %(path)r', - {'name': self._backend_name, - 'path': backend_path}) - backend_mod = importutils.import_module(backend_path) - self._backend = backend_mod.get_backend() - - def __getattr__(self, key): - if not self._backend: - self._load_backend() - - attr = getattr(self._backend, key) - if not hasattr(attr, '__call__'): - return attr - # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry - # DB API methods, decorated with @safe_for_db_retry - # on disconnect. - retry_on_disconnect = self.use_db_reconnect and attr.__dict__.get( - 'enable_retry_on_disconnect', False) - retry_on_deadlock = attr.__dict__.get('enable_retry_on_deadlock', - False) - retry_on_request = attr.__dict__.get('enable_retry_on_request', False) - - if retry_on_disconnect or retry_on_deadlock or retry_on_request: - attr = wrap_db_retry( - retry_on_disconnect=retry_on_disconnect, - retry_on_deadlock=retry_on_deadlock, - **self._wrap_db_kwargs)(attr) - - return attr - - @classmethod - def from_config(cls, conf, backend_mapping=None, lazy=False): - """Initialize DBAPI instance given a config instance. - - :param conf: oslo.config config instance - :type conf: oslo.config.cfg.ConfigOpts - - :param backend_mapping: backend name -> module/class to load mapping - :type backend_mapping: dict - - :param lazy: load the DB backend lazily on the first DB API method call - :type lazy: bool - - """ - - conf.register_opts(options.database_opts, 'database') - - return cls(backend_name=conf.database.backend, - backend_mapping=backend_mapping, - lazy=lazy, - use_db_reconnect=conf.database.use_db_reconnect, - retry_interval=conf.database.db_retry_interval, - inc_retry_interval=conf.database.db_inc_retry_interval, - max_retry_interval=conf.database.db_max_retry_interval, - max_retries=conf.database.db_max_retries) diff --git a/oslo_db/concurrency.py b/oslo_db/concurrency.py deleted file mode 100644 index 07ede0a..0000000 --- a/oslo_db/concurrency.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2014 Mirantis.inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import logging -import threading - -from oslo_config import cfg - -from oslo_db._i18n import _LE -from oslo_db import api - - -LOG = logging.getLogger(__name__) - -tpool_opts = [ - cfg.BoolOpt('use_tpool', - default=False, - deprecated_name='dbapi_use_tpool', - deprecated_group='DEFAULT', - help='Enable the experimental use of thread pooling for ' - 'all DB API calls'), -] - - -class TpoolDbapiWrapper(object): - """DB API wrapper class. - - This wraps the oslo DB API with an option to be able to use eventlet's - thread pooling. Since the CONF variable may not be loaded at the time - this class is instantiated, we must look at it on the first DB API call. - """ - - def __init__(self, conf, backend_mapping): - self._db_api = None - self._backend_mapping = backend_mapping - self._conf = conf - self._conf.register_opts(tpool_opts, 'database') - self._lock = threading.Lock() - - @property - def _api(self): - if not self._db_api: - with self._lock: - if not self._db_api: - db_api = api.DBAPI.from_config( - conf=self._conf, backend_mapping=self._backend_mapping) - if self._conf.database.use_tpool: - try: - from eventlet import tpool - except ImportError: - LOG.exception(_LE("'eventlet' is required for " - "TpoolDbapiWrapper.")) - raise - self._db_api = tpool.Proxy(db_api) - else: - self._db_api = db_api - return self._db_api - - def __getattr__(self, key): - return getattr(self._api, key) - - -def list_opts(): - """Returns a list of oslo.config options available in this module. - - :returns: a list of (group_name, opts) tuples - """ - return [('database', copy.deepcopy(tpool_opts))] diff --git a/oslo_db/exception.py b/oslo_db/exception.py deleted file mode 100644 index 2d118cd..0000000 --- a/oslo_db/exception.py +++ /dev/null @@ -1,328 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""DB related custom exceptions. - -Custom exceptions intended to determine the causes of specific database -errors. This module provides more generic exceptions than the database-specific -driver libraries, and so users of oslo.db can catch these no matter which -database the application is using. Most of the exceptions are wrappers. Wrapper -exceptions take an original exception as positional argument and keep it for -purposes of deeper debug. - -Example:: - - try: - statement(arg) - except sqlalchemy.exc.OperationalError as e: - raise DBDuplicateEntry(e) - - -This is useful to determine more specific error cases further at execution, -when you need to add some extra information to an error message. Wrapper -exceptions takes care about original error message displaying to not to loose -low level cause of an error. All the database api exceptions wrapped into -the specific exceptions provided belove. - - -Please use only database related custom exceptions with database manipulations -with `try/except` statement. This is required for consistent handling of -database errors. -""" - -import debtcollector.removals -import six - -from oslo_db._i18n import _ -from oslo_utils.excutils import CausedByException - - -class DBError(CausedByException): - - """Base exception for all custom database exceptions. - - :kwarg inner_exception: an original exception which was wrapped with - DBError or its subclasses. - """ - - def __init__(self, inner_exception=None, cause=None): - self.inner_exception = inner_exception - super(DBError, self).__init__(six.text_type(inner_exception), cause) - - -class DBDuplicateEntry(DBError): - """Duplicate entry at unique column error. - - Raised when made an attempt to write to a unique column the same entry as - existing one. :attr: `columns` available on an instance of the exception - and could be used at error handling:: - - try: - instance_type_ref.save() - except DBDuplicateEntry as e: - if 'colname' in e.columns: - # Handle error. - - :kwarg columns: a list of unique columns have been attempted to write a - duplicate entry. - :type columns: list - :kwarg value: a value which has been attempted to write. The value will - be None, if we can't extract it for a particular database backend. Only - MySQL and PostgreSQL 9.x are supported right now. - """ - def __init__(self, columns=None, inner_exception=None, value=None): - self.columns = columns or [] - self.value = value - super(DBDuplicateEntry, self).__init__(inner_exception) - - -class DBConstraintError(DBError): - """Check constraint fails for column error. - - Raised when made an attempt to write to a column a value that does not - satisfy a CHECK constraint. - - :kwarg table: the table name for which the check fails - :type table: str - :kwarg check_name: the table of the check that failed to be satisfied - :type check_name: str - """ - def __init__(self, table, check_name, inner_exception=None): - self.table = table - self.check_name = check_name - super(DBConstraintError, self).__init__(inner_exception) - - -class DBReferenceError(DBError): - """Foreign key violation error. - - :param table: a table name in which the reference is directed. - :type table: str - :param constraint: a problematic constraint name. - :type constraint: str - :param key: a broken reference key name. - :type key: str - :param key_table: a table name which contains the key. - :type key_table: str - """ - - def __init__(self, table, constraint, key, key_table, - inner_exception=None): - self.table = table - self.constraint = constraint - self.key = key - self.key_table = key_table - super(DBReferenceError, self).__init__(inner_exception) - - -class DBNonExistentConstraint(DBError): - """Constraint does not exist. - - :param table: table name - :type table: str - :param constraint: constraint name - :type table: str - """ - - def __init__(self, table, constraint, inner_exception=None): - self.table = table - self.constraint = constraint - super(DBNonExistentConstraint, self).__init__(inner_exception) - - -class DBNonExistentTable(DBError): - """Table does not exist. - - :param table: table name - :type table: str - """ - - def __init__(self, table, inner_exception=None): - self.table = table - super(DBNonExistentTable, self).__init__(inner_exception) - - -class DBDeadlock(DBError): - - """Database dead lock error. - - Deadlock is a situation that occurs when two or more different database - sessions have some data locked, and each database session requests a lock - on the data that another, different, session has already locked. - """ - - def __init__(self, inner_exception=None): - super(DBDeadlock, self).__init__(inner_exception) - - -class DBInvalidUnicodeParameter(Exception): - - """Database unicode error. - - Raised when unicode parameter is passed to a database - without encoding directive. - """ - - @debtcollector.removals.removed_property - def message(self): - # NOTE(rpodolyaka): provided for compatibility with python 3k, where - # exceptions do not have .message attribute, while we used to have one - # in this particular exception class. See LP #1542961 for details. - return str(self) - - def __init__(self): - super(DBInvalidUnicodeParameter, self).__init__( - _("Invalid Parameter: Encoding directive wasn't provided.")) - - -class DbMigrationError(DBError): - - """Wrapped migration specific exception. - - Raised when migrations couldn't be completed successfully. - """ - - def __init__(self, message=None): - super(DbMigrationError, self).__init__(message) - - -class DBMigrationError(DbMigrationError): - - """Wrapped migration specific exception. - - Raised when migrations couldn't be completed successfully. - """ - def __init__(self, message): - super(DBMigrationError, self).__init__(message) - - -debtcollector.removals.removed_class(DbMigrationError, - replacement=DBMigrationError) - - -class DBConnectionError(DBError): - - """Wrapped connection specific exception. - - Raised when database connection is failed. - """ - - pass - - -class DBDataError(DBError): - """Raised for errors that are due to problems with the processed data. - - E.g. division by zero, numeric value out of range, incorrect data type, etc - - """ - - -class InvalidSortKey(Exception): - """A sort key destined for database query usage is invalid.""" - - @debtcollector.removals.removed_property - def message(self): - # NOTE(rpodolyaka): provided for compatibility with python 3k, where - # exceptions do not have .message attribute, while we used to have one - # in this particular exception class. See LP #1542961 for details. - return str(self) - - def __init__(self, key=None): - super(InvalidSortKey, self).__init__( - _("Sort key supplied is invalid: %s") % key) - self.key = key - - -class ColumnError(Exception): - """Error raised when no column or an invalid column is found.""" - - -class BackendNotAvailable(Exception): - """Error raised when a particular database backend is not available - - within a test suite. - - """ - - -class RetryRequest(Exception): - """Error raised when DB operation needs to be retried. - - That could be intentionally raised by the code without any real DB errors. - """ - def __init__(self, inner_exc): - self.inner_exc = inner_exc - - -class NoEngineContextEstablished(AttributeError): - """Error raised for enginefacade attribute access with no context. - - - This applies to the ``session`` and ``connection`` attributes - of a user-defined context and/or RequestContext object, when they - are accessed *outside* of the scope of an enginefacade decorator - or context manager. - - The exception is a subclass of AttributeError so that - normal Python missing attribute behaviors are maintained, such - as support for ``getattr(context, 'session', None)``. - - - """ - - -class ContextNotRequestedError(AttributeError): - """Error raised when requesting a not-setup enginefacade attribute. - - This applies to the ``session`` and ``connection`` attributes - of a user-defined context and/or RequestContext object, when they - are accessed *within* the scope of an enginefacade decorator - or context manager, but the context has not requested that - attribute (e.g. like "with enginefacade.connection.using(context)" - and "context.session" is requested). - - """ - - -class CantStartEngineError(Exception): - """Error raised when the enginefacade cannot start up correctly.""" - - -class NotSupportedWarning(Warning): - """Warn that an argument or call that was passed is not supported. - - This subclasses Warning so that it can be filtered as a distinct - category. - - .. seealso:: - - https://docs.python.org/2/library/warnings.html - - """ - - -class OsloDBDeprecationWarning(DeprecationWarning): - """Issued per usage of a deprecated API. - - This subclasses DeprecationWarning so that it can be filtered as a distinct - category. - - .. seealso:: - - https://docs.python.org/2/library/warnings.html - - """ diff --git a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-error.po b/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-error.po deleted file mode 100644 index 3effdf8..0000000 --- a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-error.po +++ /dev/null @@ -1,46 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Andi Chandler , 2014-2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-03-19 07:04+0000\n" -"Last-Translator: Andi Chandler \n" -"Language: en-GB\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: English (United Kingdom)\n" - -msgid "'eventlet' is required for TpoolDbapiWrapper." -msgstr "'eventlet' is required for TpoolDbapiWrapper." - -msgid "DB exceeded retry limit." -msgstr "DB exceeded retry limit." - -msgid "DB exception wrapped." -msgstr "DB exception wrapped." - -#, python-format -msgid "DBAPIError exception wrapped from %s" -msgstr "DBAPIError exception wrapped from %s" - -#, python-format -msgid "Failed to migrate to version %(ver)s on engine %(eng)s" -msgstr "Failed to migrate to version %(ver)s on engine %(eng)s" - -msgid "" -"Migration number for migrate plugin must be valid integer or empty, if you " -"want to downgrade to initial state" -msgstr "" -"Migration number for migrate plugin must be valid integer or empty, if you " -"want to downgrade to initial state" diff --git a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-info.po b/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-info.po deleted file mode 100644 index 625378f..0000000 --- a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-info.po +++ /dev/null @@ -1,30 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Andi Chandler , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2014-11-26 01:04+0000\n" -"Last-Translator: Andi Chandler \n" -"Language: en-GB\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: English (United Kingdom)\n" - -#, python-format -msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" -msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" - -#, python-format -msgid "The %(dbapi)s backend is unavailable: %(err)s" -msgstr "The %(dbapi)s backend is unavailable: %(err)s" diff --git a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-warning.po b/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-warning.po deleted file mode 100644 index 3663129..0000000 --- a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db-log-warning.po +++ /dev/null @@ -1,46 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Andi Chandler , 2014-2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-03-19 07:04+0000\n" -"Last-Translator: Andi Chandler \n" -"Language: en-GB\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: English (United Kingdom)\n" - -msgid "Id not in sort_keys; is sort_keys unique?" -msgstr "Id not in sort_keys; is sort_keys unique?" - -#, python-format -msgid "" -"MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" -msgstr "" -"MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" - -#, python-format -msgid "" -"Parent process %(orig)s forked (%(newproc)s) with an open database " -"connection, which is being discarded and recreated." -msgstr "" -"Parent process %(orig)s forked (%(newproc)s) with an open database " -"connection, which is being discarded and recreated." - -#, python-format -msgid "SQL connection failed. %s attempts left." -msgstr "SQL connection failed. %s attempts left." - -msgid "Unable to detect effective SQL mode" -msgstr "Unable to detect effective SQL mode" diff --git a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db.po b/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db.po deleted file mode 100644 index bc5d79a..0000000 --- a/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db.po +++ /dev/null @@ -1,85 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Andi Chandler , 2014-2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev46\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-15 11:18+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-20 06:31+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language: en-GB\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: English (United Kingdom)\n" - -msgid "Invalid Parameter: Encoding directive wasn't provided." -msgstr "Invalid Parameter: Encoding directive wasn't provided." - -#, python-format -msgid "" -"Please specify column %s in col_name_col_instance param. It is required " -"because column has unsupported type by SQLite." -msgstr "" -"Please specify column %s in col_name_col_instance param. It is required " -"because column has unsupported type by SQLite." - -#, python-format -msgid "Sort key supplied is invalid: %s" -msgstr "Sort key supplied is invalid: %s" - -#, python-format -msgid "" -"Tables \"%s\" have non utf8 collation, please make sure all tables are " -"CHARSET=utf8" -msgstr "" -"Tables \"%s\" have non utf8 collation, please make sure all tables are " -"CHARSET=utf8" - -msgid "" -"The database is not under version control, but has tables. Please stamp the " -"current version of the schema manually." -msgstr "" -"The database is not under version control, but has tables. Please stamp the " -"current version of the schema manually." - -#, python-format -msgid "" -"There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " -"feature." -msgstr "" -"There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " -"feature." - -#, python-format -msgid "There is no `project_id` column in `%s` table." -msgstr "There is no `project_id` column in `%s` table." - -#, python-format -msgid "Unknown sort direction, must be one of: %s" -msgstr "Unknown sort direction, must be one of: %s" - -msgid "Unsupported id columns type" -msgstr "Unsupported id columns type" - -#, python-format -msgid "" -"col_name_col_instance param has wrong type of column instance for column %s " -"It should be instance of sqlalchemy.Column." -msgstr "" -"col_name_col_instance param has wrong type of column instance for column %s " -"It should be instance of sqlalchemy.Column." - -msgid "model should be a subclass of ModelBase" -msgstr "model should be a subclass of ModelBase" - -msgid "version should be an integer" -msgstr "version should be an integer" diff --git a/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-error.po b/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-error.po deleted file mode 100644 index 124f844..0000000 --- a/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-error.po +++ /dev/null @@ -1,46 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Adriana Chisco Landazábal , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-06-22 09:08+0000\n" -"Last-Translator: Adriana Chisco Landazábal \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -msgid "'eventlet' is required for TpoolDbapiWrapper." -msgstr "Se necesita 'eventlet' para TpoolDbapiWrapper." - -msgid "DB exceeded retry limit." -msgstr "La base de datos excedió el límite de intentos." - -msgid "DB exception wrapped." -msgstr "excepción de base de datos empaquetada." - -#, python-format -msgid "DBAPIError exception wrapped from %s" -msgstr "Excepción de error DBAPI empaquetada de %s" - -#, python-format -msgid "Failed to migrate to version %(ver)s on engine %(eng)s" -msgstr "Error al migrar a versión %(ver)s en motor %(eng)s" - -msgid "" -"Migration number for migrate plugin must be valid integer or empty, if you " -"want to downgrade to initial state" -msgstr "" -"Si desea volver al estado inicial de la versión anterior, el número de " -"migración para el complemento de migración debe ser un entero válido o vacío" diff --git a/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-info.po b/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-info.po deleted file mode 100644 index 977c4ef..0000000 --- a/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-info.po +++ /dev/null @@ -1,30 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Adriana Chisco Landazábal , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-06-22 09:10+0000\n" -"Last-Translator: Adriana Chisco Landazábal \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -#, python-format -msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" -msgstr "Eliminando fila duplicada con id: %(id)s de la tabla: %(table)s" - -#, python-format -msgid "The %(dbapi)s backend is unavailable: %(err)s" -msgstr "El %(dbapi)s backend ino está disponible: %(err)s" diff --git a/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-warning.po b/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-warning.po deleted file mode 100644 index 4d5ec75..0000000 --- a/oslo_db/locale/es/LC_MESSAGES/oslo_db-log-warning.po +++ /dev/null @@ -1,47 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Adriana Chisco Landazábal , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-06-22 09:13+0000\n" -"Last-Translator: Adriana Chisco Landazábal \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -msgid "Id not in sort_keys; is sort_keys unique?" -msgstr "Id no está en sort_keys; ¿sort_keys es exclusivo?" - -#, python-format -msgid "" -"MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" -msgstr "" -"El modo MySQL de SL es: '%s', considere habilitar TRADITIONAL o " -"STRICT_ALL_TABLES" - -#, python-format -msgid "" -"Parent process %(orig)s forked (%(newproc)s) with an open database " -"connection, which is being discarded and recreated." -msgstr "" -"El proceso padre %(orig)s bifurcó (%(newproc)s) con una conexión abierta de " -"base de datos, la cual se está descartando y creando de nuevo." - -#, python-format -msgid "SQL connection failed. %s attempts left." -msgstr "La conexión SQL ha fallado. Quedan %s intentos." - -msgid "Unable to detect effective SQL mode" -msgstr "No es posible detectar modo SQL efectivo" diff --git a/oslo_db/locale/es/LC_MESSAGES/oslo_db.po b/oslo_db/locale/es/LC_MESSAGES/oslo_db.po deleted file mode 100644 index e6ed95b..0000000 --- a/oslo_db/locale/es/LC_MESSAGES/oslo_db.po +++ /dev/null @@ -1,82 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Adriana Chisco Landazábal , 2015 -# Miriam Godinez , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-09-07 10:45+0000\n" -"Last-Translator: Miriam Godinez \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -msgid "Invalid Parameter: Encoding directive wasn't provided." -msgstr "Parámetro no válido: No se proporcionó directiva de codificación." - -#, python-format -msgid "" -"Please specify column %s in col_name_col_instance param. It is required " -"because column has unsupported type by SQLite." -msgstr "" -"Por favor especifique la columna %s en el parámetro col_name_col_instance. " -"Es necesario porque la columna tiene un tipo no soportado por SQLite." - -#, python-format -msgid "" -"Tables \"%s\" have non utf8 collation, please make sure all tables are " -"CHARSET=utf8" -msgstr "" -"Las tablas \"%s\" no tienen una colación utf8, por favor asegúrese de que " -"todas las tablas sean CHARSET=utf8" - -msgid "" -"The database is not under version control, but has tables. Please stamp the " -"current version of the schema manually." -msgstr "" -"La base de datos no está bajo el control de la versión, pero tiene tablas. " -"Por favor indique manualmente la versión actual del esquema." - -#, python-format -msgid "" -"There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " -"feature." -msgstr "" -"No existe la columna `deleted` en la tabla `%s`. El projecto no utiliza la " -"característica de eliminación suave." - -#, python-format -msgid "There is no `project_id` column in `%s` table." -msgstr "No existe la columna `project_id` en la tabla `%s`." - -#, python-format -msgid "Unknown sort direction, must be one of: %s" -msgstr "Clase de dirección desconocida, debe ser una de: %s" - -msgid "Unsupported id columns type" -msgstr "Tipo de identificador de columnas no soportado" - -#, python-format -msgid "" -"col_name_col_instance param has wrong type of column instance for column %s " -"It should be instance of sqlalchemy.Column." -msgstr "" -"El parámetro col_name_col_instance contiene el tipo incorrecto de instancia " -"de columna para la columna %s. Debe ser una instancia de sqlalchemy.Column." - -msgid "model should be a subclass of ModelBase" -msgstr "el modelo debe ser una subclase del ModelBase" - -msgid "version should be an integer" -msgstr "la versión debe ser un entero" diff --git a/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-error.po b/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-error.po deleted file mode 100644 index 996b2ba..0000000 --- a/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-error.po +++ /dev/null @@ -1,47 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Maxime COQUEREL , 2014-2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-02-18 02:50+0000\n" -"Last-Translator: Maxime COQUEREL \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: French\n" - -msgid "'eventlet' is required for TpoolDbapiWrapper." -msgstr "'eventlet' est requis pour poolDbapiWrapper." - -msgid "DB exceeded retry limit." -msgstr "DB limite de tentatives dépassé." - -msgid "DB exception wrapped." -msgstr "DB exception enveloppé." - -#, python-format -msgid "DBAPIError exception wrapped from %s" -msgstr "Exception DBAPIError enveloppé depuis %s" - -#, python-format -msgid "Failed to migrate to version %(ver)s on engine %(eng)s" -msgstr "Échec de migration de la version %(ver)s sur le moteur %(eng)s" - -msgid "" -"Migration number for migrate plugin must be valid integer or empty, if you " -"want to downgrade to initial state" -msgstr "" -"Le numéro de migration pour la migration de plugin doit être un entier " -"valide ou un champs nul, si vous souhaitez revenir vers l'état initial de la " -"précédente version" diff --git a/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-info.po b/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-info.po deleted file mode 100644 index 74a17a4..0000000 --- a/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-info.po +++ /dev/null @@ -1,32 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Maxime COQUEREL , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2014-11-23 06:38+0000\n" -"Last-Translator: Maxime COQUEREL \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: French\n" - -#, python-format -msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" -msgstr "" -"Suppression de la ligne dupliquée avec l'id: %(id)s depuis la table: " -"%(table)s" - -#, python-format -msgid "The %(dbapi)s backend is unavailable: %(err)s" -msgstr "%(dbapi)s backend n'est pas disponible: %(err)s" diff --git a/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-warning.po b/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-warning.po deleted file mode 100644 index a2203db..0000000 --- a/oslo_db/locale/fr/LC_MESSAGES/oslo_db-log-warning.po +++ /dev/null @@ -1,46 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Lucas Mascaro , 2015 -# Maxime COQUEREL , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-17 11:08+0000\n" -"Last-Translator: Lucas Mascaro \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: French\n" - -msgid "Id not in sort_keys; is sort_keys unique?" -msgstr "Id n'est pas dans sort_keys; sort_keys est unique ?" - -#, python-format -msgid "" -"MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" -msgstr "MySQL SQL mode est '%s', TRADITIONAL ou STRICT_ALL_TABLES" - -#, python-format -msgid "" -"Parent process %(orig)s forked (%(newproc)s) with an open database " -"connection, which is being discarded and recreated." -msgstr "" -"Le processus parent %(orig)s s'est séparé (%(newproc)s) avec une connection " -"a la base de donnée ouverte, elle sera rejetée et recréée." - -#, python-format -msgid "SQL connection failed. %s attempts left." -msgstr "Echec de connexion SQL. %s tentatives échouées" - -msgid "Unable to detect effective SQL mode" -msgstr "Impossible de détecter le mode SQL" diff --git a/oslo_db/locale/fr/LC_MESSAGES/oslo_db.po b/oslo_db/locale/fr/LC_MESSAGES/oslo_db.po deleted file mode 100644 index 14dc4b4..0000000 --- a/oslo_db/locale/fr/LC_MESSAGES/oslo_db.po +++ /dev/null @@ -1,83 +0,0 @@ -# Translations template for oslo.db. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the oslo.db project. -# -# Translators: -# Lucas Mascaro , 2015 -# Maxime COQUEREL , 2014-2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db 4.6.1.dev19\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-19 04:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-07 04:24+0000\n" -"Last-Translator: Lucas Mascaro \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: French\n" - -msgid "Invalid Parameter: Encoding directive wasn't provided." -msgstr "Paramètre non valide : La directive encodage n'a pas été fourni." - -#, python-format -msgid "" -"Please specify column %s in col_name_col_instance param. It is required " -"because column has unsupported type by SQLite." -msgstr "" -"Spécifiez la colonne %s dans le paramètre col_name_col_instance. Ceci est " -"obligatoire car la colonne a un type non pris en charge dans SQLite." - -#, python-format -msgid "" -"Tables \"%s\" have non utf8 collation, please make sure all tables are " -"CHARSET=utf8" -msgstr "" -"Les tables \"%s\" ont une collation non utf8, assurez-vous que pour toutes " -"les tables CHARSET=utf8." - -msgid "" -"The database is not under version control, but has tables. Please stamp the " -"current version of the schema manually." -msgstr "" -"La base de données n'est pas versionnée, mais contient des tables. Veuillez " -"indiquer manuellement la version courante du schéma." - -#, python-format -msgid "" -"There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " -"feature." -msgstr "" -"Il n'y a aucune colonne `deleted` dans la table `%s`. Le projet ne peut pas " -"utiliser cette fonctionnalité." - -#, python-format -msgid "There is no `project_id` column in `%s` table." -msgstr "Il n'y a pas de colonne `project_id` dans la table `%s`." - -#, python-format -msgid "Unknown sort direction, must be one of: %s" -msgstr "Ordre de tris inconnu, il doit être un de: %s" - -msgid "Unsupported id columns type" -msgstr "Type de colonnes id non pris en charge" - -#, python-format -msgid "" -"col_name_col_instance param has wrong type of column instance for column %s " -"It should be instance of sqlalchemy.Column." -msgstr "" -"Le paramètre col_name_col_instance contient un type d'instance de colonne " -"incorrect pour la colonne %s. Il devrait être une instance de sqlalchemy." -"Column." - -msgid "model should be a subclass of ModelBase" -msgstr "model doit etre une sous-classe de ModelBase" - -msgid "version should be an integer" -msgstr "version doit être un entier" diff --git a/oslo_db/options.py b/oslo_db/options.py deleted file mode 100644 index d5d15cf..0000000 --- a/oslo_db/options.py +++ /dev/null @@ -1,229 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from debtcollector import removals -from oslo_config import cfg - - -database_opts = [ - cfg.StrOpt('sqlite_db', - deprecated_for_removal=True, - deprecated_reason='Should use config option connection or ' - 'slave_connection to connect the database.', - deprecated_group='DEFAULT', - default='oslo.sqlite', - help='The file name to use with SQLite.'), - cfg.BoolOpt('sqlite_synchronous', - deprecated_group='DEFAULT', - default=True, - help='If True, SQLite uses synchronous mode.'), - cfg.StrOpt('backend', - default='sqlalchemy', - deprecated_name='db_backend', - deprecated_group='DEFAULT', - help='The back end to use for the database.'), - cfg.StrOpt('connection', - help='The SQLAlchemy connection string to use to connect to ' - 'the database.', - secret=True, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_connection', - group='DATABASE'), - cfg.DeprecatedOpt('connection', - group='sql'), ]), - cfg.StrOpt('slave_connection', - secret=True, - help='The SQLAlchemy connection string to use to connect to the' - ' slave database.'), - cfg.StrOpt('mysql_sql_mode', - default='TRADITIONAL', - help='The SQL mode to be used for MySQL sessions. ' - 'This option, including the default, overrides any ' - 'server-set SQL mode. To use whatever SQL mode ' - 'is set by the server configuration, ' - 'set this to no value. Example: mysql_sql_mode='), - cfg.IntOpt('idle_timeout', - default=3600, - deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_idle_timeout', - group='DATABASE'), - cfg.DeprecatedOpt('idle_timeout', - group='sql')], - help='Timeout before idle SQL connections are reaped.'), - cfg.IntOpt('min_pool_size', - default=1, - deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_min_pool_size', - group='DATABASE')], - help='Minimum number of SQL connections to keep open in a ' - 'pool.'), - cfg.IntOpt('max_pool_size', - default=5, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_max_pool_size', - group='DATABASE')], - help='Maximum number of SQL connections to keep open in a ' - 'pool. Setting a value of 0 indicates no limit.'), - cfg.IntOpt('max_retries', - default=10, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_max_retries', - group='DATABASE')], - help='Maximum number of database connection retries ' - 'during startup. Set to -1 to specify an infinite ' - 'retry count.'), - cfg.IntOpt('retry_interval', - default=10, - deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', - group='DEFAULT'), - cfg.DeprecatedOpt('reconnect_interval', - group='DATABASE')], - help='Interval between retries of opening a SQL connection.'), - cfg.IntOpt('max_overflow', - default=50, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', - group='DEFAULT'), - cfg.DeprecatedOpt('sqlalchemy_max_overflow', - group='DATABASE')], - help='If set, use this value for max_overflow with ' - 'SQLAlchemy.'), - cfg.IntOpt('connection_debug', - default=0, - min=0, max=100, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', - group='DEFAULT')], - help='Verbosity of SQL debugging information: 0=None, ' - '100=Everything.'), - cfg.BoolOpt('connection_trace', - default=False, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', - group='DEFAULT')], - help='Add Python stack traces to SQL as comment strings.'), - cfg.IntOpt('pool_timeout', - deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', - group='DATABASE')], - help='If set, use this value for pool_timeout with ' - 'SQLAlchemy.'), - cfg.BoolOpt('use_db_reconnect', - default=False, - help='Enable the experimental use of database reconnect ' - 'on connection lost.'), - cfg.IntOpt('db_retry_interval', - default=1, - help='Seconds between retries of a database transaction.'), - cfg.BoolOpt('db_inc_retry_interval', - default=True, - help='If True, increases the interval between retries ' - 'of a database operation up to db_max_retry_interval.'), - cfg.IntOpt('db_max_retry_interval', - default=10, - help='If db_inc_retry_interval is set, the ' - 'maximum seconds between retries of a ' - 'database operation.'), - cfg.IntOpt('db_max_retries', - default=20, - help='Maximum retries in case of connection error or deadlock ' - 'error before error is ' - 'raised. Set to -1 to specify an infinite retry ' - 'count.'), -] - - -@removals.removed_kwarg("sqlite_db", - "Config option sqlite_db is deprecated for removal," - "please use option `connection`.") -def set_defaults(conf, connection=None, sqlite_db=None, - max_pool_size=None, max_overflow=None, - pool_timeout=None): - """Set defaults for configuration variables. - - Overrides default options values. - - :param conf: Config instance specified to set default options in it. Using - of instances instead of a global config object prevents conflicts between - options declaration. - :type conf: oslo.config.cfg.ConfigOpts instance. - - :keyword connection: SQL connection string. - Valid SQLite URL forms are: - * sqlite:///:memory: (or, sqlite://) - * sqlite:///relative/path/to/file.db - * sqlite:////absolute/path/to/file.db - :type connection: str - - :keyword sqlite_db: path to SQLite database file. - :type sqlite_db: str - - :keyword max_pool_size: maximum connections pool size. The size of the pool - to be maintained, defaults to 5. This is the largest number of connections - that will be kept persistently in the pool. Note that the pool begins with - no connections; once this number of connections is requested, that number - of connections will remain. - :type max_pool_size: int - :default max_pool_size: 5 - - :keyword max_overflow: The maximum overflow size of the pool. When the - number of checked-out connections reaches the size set in pool_size, - additional connections will be returned up to this limit. When those - additional connections are returned to the pool, they are disconnected and - discarded. It follows then that the total number of simultaneous - connections the pool will allow is pool_size + max_overflow, and the total - number of "sleeping" connections the pool will allow is pool_size. - max_overflow can be set to -1 to indicate no overflow limit; no limit will - be placed on the total number of concurrent connections. Defaults to 10, - will be used if value of the parameter in `None`. - :type max_overflow: int - :default max_overflow: None - - :keyword pool_timeout: The number of seconds to wait before giving up on - returning a connection. Defaults to 30, will be used if value of the - parameter is `None`. - :type pool_timeout: int - :default pool_timeout: None - """ - - conf.register_opts(database_opts, group='database') - - if connection is not None: - conf.set_default('connection', connection, group='database') - if sqlite_db is not None: - conf.set_default('sqlite_db', sqlite_db, group='database') - if max_pool_size is not None: - conf.set_default('max_pool_size', max_pool_size, group='database') - if max_overflow is not None: - conf.set_default('max_overflow', max_overflow, group='database') - if pool_timeout is not None: - conf.set_default('pool_timeout', pool_timeout, group='database') - - -def list_opts(): - """Returns a list of oslo.config options available in the library. - - The returned list includes all oslo.config options which may be registered - at runtime by the library. - - Each element of the list is a tuple. The first element is the name of the - group under which the list of elements in the second element will be - registered. A group name of None corresponds to the [DEFAULT] group in - config files. - - The purpose of this is to allow tools like the Oslo sample config file - generator to discover the options exposed to users by this library. - - :returns: a list of (group_name, opts) tuples - """ - return [('database', database_opts)] diff --git a/oslo_db/sqlalchemy/__init__.py b/oslo_db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_db/sqlalchemy/compat/__init__.py b/oslo_db/sqlalchemy/compat/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_db/sqlalchemy/compat/utils.py b/oslo_db/sqlalchemy/compat/utils.py deleted file mode 100644 index 8ebffcc..0000000 --- a/oslo_db/sqlalchemy/compat/utils.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import re - -import sqlalchemy - - -SQLA_VERSION = tuple( - int(num) if re.match(r'^\d+$', num) else num - for num in sqlalchemy.__version__.split(".") -) - -sqla_100 = SQLA_VERSION >= (1, 0, 0) -sqla_097 = SQLA_VERSION >= (0, 9, 7) -sqla_094 = SQLA_VERSION >= (0, 9, 4) -sqla_090 = SQLA_VERSION >= (0, 9, 0) -sqla_08 = SQLA_VERSION >= (0, 8) - - -def get_postgresql_enums(conn): - """Return a list of ENUM type names on a Postgresql backend. - - For SQLAlchemy 0.9 and lower, makes use of the semi-private - _load_enums() method of the Postgresql dialect. In SQLAlchemy - 1.0 this feature is supported using get_enums(). - - This function may only be called when the given connection - is against the Postgresql backend. It will fail for other - kinds of backends. - - """ - if sqla_100: - return [e['name'] for e in sqlalchemy.inspect(conn).get_enums()] - else: - return conn.dialect._load_enums(conn).keys() diff --git a/oslo_db/sqlalchemy/enginefacade.py b/oslo_db/sqlalchemy/enginefacade.py deleted file mode 100644 index cd5d74b..0000000 --- a/oslo_db/sqlalchemy/enginefacade.py +++ /dev/null @@ -1,1171 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import functools -import inspect -import operator -import threading -import warnings - -from oslo_config import cfg - -from oslo_db import exception -from oslo_db import options -from oslo_db.sqlalchemy import engines -from oslo_db.sqlalchemy import orm - - -class _symbol(object): - """represent a fixed symbol.""" - - __slots__ = 'name', - - def __init__(self, name): - self.name = name - - def __repr__(self): - return "symbol(%r)" % self.name - - -_ASYNC_READER = _symbol('ASYNC_READER') -"""Represent the transaction state of "async reader". - -This state indicates that the transaction is a read-only and is -safe to use on an asynchronously updated slave database. - -""" - -_READER = _symbol('READER') -"""Represent the transaction state of "reader". - -This state indicates that the transaction is a read-only and is -only safe to use on a synchronously updated slave database; otherwise -the master database should be used. - -""" - - -_WRITER = _symbol('WRITER') -"""Represent the transaction state of "writer". - -This state indicates that the transaction writes data and -should be directed at the master database. - -""" - - -class _Default(object): - """Mark a value as a default value. - - A value in the local configuration dictionary wrapped with - _Default() will not take precedence over a value that is specified - in cfg.CONF. Values that are set after the fact using configure() - will supersede those in cfg.CONF. - - """ - - __slots__ = 'value', - - _notset = _symbol("NOTSET") - - def __init__(self, value=_notset): - self.value = value - - @classmethod - def resolve(cls, value): - if isinstance(value, _Default): - v = value.value - if v is cls._notset: - return None - else: - return v - else: - return value - - @classmethod - def resolve_w_conf(cls, value, conf_namespace, key): - if isinstance(value, _Default): - v = getattr(conf_namespace, key, value.value) - if v is cls._notset: - return None - else: - return v - else: - return value - - @classmethod - def is_set(cls, value): - return not isinstance(value, _Default) or \ - value.value is not cls._notset - - @classmethod - def is_set_w_conf(cls, value, conf_namespace, key): - return not isinstance(value, _Default) or \ - value.value is not cls._notset or \ - hasattr(conf_namespace, key) - - -class _TransactionFactory(object): - """A factory for :class:`._TransactionContext` objects. - - By default, there is just one of these, set up - based on CONF, however instance-level :class:`._TransactionFactory` - objects can be made, as is the case with the - :class:`._TestTransactionFactory` subclass used by the oslo.db test suite. - - """ - def __init__(self): - self._url_cfg = { - 'connection': _Default(), - 'slave_connection': _Default(), - } - self._engine_cfg = { - 'sqlite_fk': _Default(False), - 'mysql_sql_mode': _Default('TRADITIONAL'), - 'idle_timeout': _Default(3600), - 'connection_debug': _Default(0), - 'max_pool_size': _Default(), - 'max_overflow': _Default(), - 'pool_timeout': _Default(), - 'sqlite_synchronous': _Default(True), - 'connection_trace': _Default(False), - 'max_retries': _Default(10), - 'retry_interval': _Default(10), - 'thread_checkin': _Default(True), - 'json_serializer': _Default(None), - 'json_deserializer': _Default(None), - 'logging_name': _Default(None) - } - self._maker_cfg = { - 'expire_on_commit': _Default(False), - '__autocommit': True - } - self._transaction_ctx_cfg = { - 'rollback_reader_sessions': False, - } - self._facade_cfg = { - 'synchronous_reader': True, - 'on_engine_create': [], - } - - # other options that are defined in oslo.db.options.database_opts - # but do not apply to the standard enginefacade arguments - # (most seem to apply to api.DBAPI). - self._ignored_cfg = dict( - (k, _Default(None)) for k in [ - 'db_max_retries', 'db_inc_retry_interval', - 'use_db_reconnect', - 'db_retry_interval', 'min_pool_size', - 'db_max_retry_interval', - 'sqlite_db', 'backend']) - - self._started = False - self._legacy_facade = None - self._start_lock = threading.Lock() - - def configure_defaults(self, **kw): - """Apply default configurational options. - - This method can only be called before any specific - transaction-beginning methods have been called. - - Configurational options are within a fixed set of keys, and fall - under three categories: URL configuration, engine configuration, - and session configuration. Each key given will be tested against - these three configuration sets to see which one is applicable; if - it is not applicable to any set, an exception is raised. - - The configurational options given here act as **defaults** - when the :class:`._TransactionFactory` is configured using - a :class:`oslo_config.cfg.ConfigOpts` object; the options - present within the :class:`oslo_config.cfg.ConfigOpts` **take - precedence** versus the arguments passed here. By default, - the :class:`._TransactionFactory` loads in the configuration from - :data:`oslo_config.cfg.CONF`, after applying the - :data:`oslo_db.options.database_opts` configurational defaults to it. - - .. seealso:: - - :meth:`._TransactionFactory.configure` - - """ - self._configure(True, kw) - - def configure(self, **kw): - """Apply configurational options. - - This method can only be called before any specific - transaction-beginning methods have been called. - - Behavior here is the same as that of - :meth:`._TransactionFactory.configure_defaults`, - with the exception that values specified here will **supersede** those - setup in the :class:`oslo_config.cfg.ConfigOpts` options. - - .. seealso:: - - :meth:`._TransactionFactory.configure_defaults` - - """ - self._configure(False, kw) - - def _configure(self, as_defaults, kw): - - if self._started: - raise TypeError("this TransactionFactory is already started") - not_supported = [] - for k, v in kw.items(): - for dict_ in ( - self._url_cfg, self._engine_cfg, - self._maker_cfg, self._ignored_cfg, - self._facade_cfg, self._transaction_ctx_cfg): - if k in dict_: - dict_[k] = _Default(v) if as_defaults else v - break - else: - not_supported.append(k) - - if not_supported: - # would like to raise ValueError here, but there are just - # too many unrecognized (obsolete?) configuration options - # coming in from projects - warnings.warn( - "Configuration option(s) %r not supported" % - sorted(not_supported), - exception.NotSupportedWarning - ) - - def get_legacy_facade(self): - """Return a :class:`.LegacyEngineFacade` for this factory. - - This facade will make use of the same engine and sessionmaker - as this factory, however will not share the same transaction context; - the legacy facade continues to work the old way of returning - a new Session each time get_session() is called. - - """ - if not self._legacy_facade: - self._legacy_facade = LegacyEngineFacade(None, _factory=self) - if not self._started: - self._start() - - return self._legacy_facade - - def _create_connection(self, mode): - if not self._started: - self._start() - if mode is _WRITER: - return self._writer_engine.connect() - elif self.synchronous_reader or mode is _ASYNC_READER: - return self._reader_engine.connect() - else: - return self._writer_engine.connect() - - def _create_session(self, mode, bind=None): - if not self._started: - self._start() - kw = {} - # don't pass 'bind' if bind is None; the sessionmaker - # already has a bind to the engine. - if bind: - kw['bind'] = bind - if mode is _WRITER: - return self._writer_maker(**kw) - elif self.synchronous_reader or mode is _ASYNC_READER: - return self._reader_maker(**kw) - else: - return self._writer_maker(**kw) - - def _create_factory_copy(self): - factory = _TransactionFactory() - factory._url_cfg.update(self._url_cfg) - factory._engine_cfg.update(self._engine_cfg) - factory._maker_cfg.update(self._maker_cfg) - factory._transaction_ctx_cfg.update(self._transaction_ctx_cfg) - factory._facade_cfg.update(self._facade_cfg) - return factory - - def _args_for_conf(self, default_cfg, conf): - if conf is None: - return dict( - (key, _Default.resolve(value)) - for key, value in default_cfg.items() - if _Default.is_set(value) - ) - else: - return dict( - (key, _Default.resolve_w_conf(value, conf.database, key)) - for key, value in default_cfg.items() - if _Default.is_set_w_conf(value, conf.database, key) - ) - - def _url_args_for_conf(self, conf): - return self._args_for_conf(self._url_cfg, conf) - - def _engine_args_for_conf(self, conf): - return self._args_for_conf(self._engine_cfg, conf) - - def _maker_args_for_conf(self, conf): - maker_args = self._args_for_conf(self._maker_cfg, conf) - maker_args['autocommit'] = maker_args.pop('__autocommit') - return maker_args - - def dispose_pool(self): - """Call engine.pool.dispose() on underlying Engine objects.""" - with self._start_lock: - if not self._started: - return - - self._writer_engine.pool.dispose() - if self._reader_engine is not self._writer_engine: - self._reader_engine.pool.dispose() - - def _start(self, conf=False, connection=None, slave_connection=None): - with self._start_lock: - # self._started has been checked on the outside - # when _start() was called. Within the lock, - # check the flag once more to detect the case where - # the start process proceeded while this thread was waiting - # for the lock. - if self._started: - return - if conf is False: - conf = cfg.CONF - - # perform register_opts() local to actually using - # the cfg.CONF to maintain exact compatibility with - # the EngineFacade design. This can be changed if needed. - if conf is not None: - conf.register_opts(options.database_opts, 'database') - - url_args = self._url_args_for_conf(conf) - if connection: - url_args['connection'] = connection - if slave_connection: - url_args['slave_connection'] = slave_connection - engine_args = self._engine_args_for_conf(conf) - maker_args = self._maker_args_for_conf(conf) - - self._writer_engine, self._writer_maker = \ - self._setup_for_connection( - url_args['connection'], - engine_args, maker_args) - - if url_args.get('slave_connection'): - self._reader_engine, self._reader_maker = \ - self._setup_for_connection( - url_args['slave_connection'], - engine_args, maker_args) - else: - self._reader_engine, self._reader_maker = \ - self._writer_engine, self._writer_maker - - self.synchronous_reader = self._facade_cfg['synchronous_reader'] - - # set up _started last, so that in case of exceptions - # we try the whole thing again and report errors - # correctly - self._started = True - - def _setup_for_connection( - self, sql_connection, engine_kwargs, maker_kwargs): - if sql_connection is None: - raise exception.CantStartEngineError( - "No sql_connection parameter is established") - engine = engines.create_engine( - sql_connection=sql_connection, **engine_kwargs) - for hook in self._facade_cfg['on_engine_create']: - hook(engine) - sessionmaker = orm.get_maker(engine=engine, **maker_kwargs) - return engine, sessionmaker - - -class _TestTransactionFactory(_TransactionFactory): - """A :class:`._TransactionFactory` used by test suites. - - This is a :class:`._TransactionFactory` that can be directly injected - with an existing engine and sessionmaker. - - Note that while this is used by oslo.db's own tests of - the enginefacade system, it is also exported for use by - the test suites of other projects, first as an element of the - oslo_db.sqlalchemy.test_base module, and secondly may be used by - external test suites directly. - - Includes a feature to inject itself temporarily as the factory - within the global :class:`._TransactionContextManager`. - - """ - def __init__(self, engine, maker, apply_global, synchronous_reader): - self._reader_engine = self._writer_engine = engine - self._reader_maker = self._writer_maker = maker - self._started = True - self._legacy_facade = None - self.synchronous_reader = synchronous_reader - - self._facade_cfg = _context_manager._factory._facade_cfg - self._transaction_ctx_cfg = \ - _context_manager._factory._transaction_ctx_cfg - if apply_global: - self.existing_factory = _context_manager._factory - _context_manager._root_factory = self - - def dispose_global(self): - _context_manager._root_factory = self.existing_factory - - -class _TransactionContext(object): - """Represent a single database transaction in progress.""" - - def __init__( - self, factory, global_factory=None, - rollback_reader_sessions=False): - """Construct a new :class:`.TransactionContext`. - - :param factory: the :class:`.TransactionFactory` which will - serve as a source of connectivity. - - :param global_factory: the "global" factory which will be used - by the global ``_context_manager`` for new ``_TransactionContext`` - objects created under this one. When left as None the actual - "global" factory is used. - - :param rollback_reader_sessions: if True, a :class:`.Session` object - will have its :meth:`.Session.rollback` method invoked at the end - of a ``@reader`` block, actively rolling back the transaction and - expiring the objects within, before the :class:`.Session` moves - on to be closed, which has the effect of releasing connection - resources back to the connection pool and detaching all objects. - If False, the :class:`.Session` is - not affected at the end of a ``@reader`` block; the underlying - connection referred to by this :class:`.Session` will still - be released in the enclosing context via the :meth:`.Session.close` - method, which still ensures that the DBAPI connection is rolled - back, however the objects associated with the :class:`.Session` - retain their database-persisted contents after they are detached. - - .. seealso:: - - http://docs.sqlalchemy.org/en/rel_0_9/glossary.html#term-released\ - SQLAlchemy documentation on what "releasing resources" means. - - """ - self.factory = factory - self.global_factory = global_factory - self.mode = None - self.session = None - self.connection = None - self.transaction = None - kw = self.factory._transaction_ctx_cfg - self.rollback_reader_sessions = kw['rollback_reader_sessions'] - - @contextlib.contextmanager - def _connection(self, savepoint=False): - if self.connection is None: - try: - if self.session is not None: - # use existing session, which is outer to us - self.connection = self.session.connection() - if savepoint: - with self.connection.begin_nested(): - yield self.connection - else: - yield self.connection - else: - # is outermost - self.connection = self.factory._create_connection( - mode=self.mode) - self.transaction = self.connection.begin() - try: - yield self.connection - self._end_connection_transaction(self.transaction) - except Exception: - self.transaction.rollback() - # TODO(zzzeek) do we need save_and_reraise() here, - # or do newer eventlets not have issues? we are using - # raw "raise" in many other places in oslo.db already - # (and one six.reraise()). - raise - finally: - self.transaction = None - self.connection.close() - finally: - self.connection = None - - else: - # use existing connection, which is outer to us - if savepoint: - with self.connection.begin_nested(): - yield self.connection - else: - yield self.connection - - @contextlib.contextmanager - def _session(self, savepoint=False): - if self.session is None: - self.session = self.factory._create_session( - bind=self.connection, mode=self.mode) - try: - self.session.begin() - yield self.session - self._end_session_transaction(self.session) - except Exception: - self.session.rollback() - # TODO(zzzeek) do we need save_and_reraise() here, - # or do newer eventlets not have issues? we are using - # raw "raise" in many other places in oslo.db already - # (and one six.reraise()). - raise - finally: - self.session.close() - self.session = None - else: - # use existing session, which is outer to us - if savepoint: - with self.session.begin_nested(): - yield self.session - else: - yield self.session - - def _end_session_transaction(self, session): - if self.mode is _WRITER: - session.commit() - elif self.rollback_reader_sessions: - session.rollback() - # In the absence of calling session.rollback(), - # the next call is session.close(). This releases all - # objects from the session into the detached state, and - # releases the connection as well; the connection when returned - # to the pool is either rolled back in any case, or closed fully. - - def _end_connection_transaction(self, transaction): - if self.mode is _WRITER: - transaction.commit() - else: - transaction.rollback() - - def _produce_block(self, mode, connection, savepoint, allow_async=False): - if mode is _WRITER: - self._writer() - elif mode is _ASYNC_READER: - self._async_reader() - else: - self._reader(allow_async) - if connection: - return self._connection(savepoint) - else: - return self._session(savepoint) - - def _writer(self): - if self.mode is None: - self.mode = _WRITER - elif self.mode is _READER: - raise TypeError( - "Can't upgrade a READER transaction " - "to a WRITER mid-transaction") - elif self.mode is _ASYNC_READER: - raise TypeError( - "Can't upgrade an ASYNC_READER transaction " - "to a WRITER mid-transaction") - - def _reader(self, allow_async=False): - if self.mode is None: - self.mode = _READER - elif self.mode is _ASYNC_READER and not allow_async: - raise TypeError( - "Can't upgrade an ASYNC_READER transaction " - "to a READER mid-transaction") - - def _async_reader(self): - if self.mode is None: - self.mode = _ASYNC_READER - - -class _TransactionContextTLocal(threading.local): - def __deepcopy__(self, memo): - return self - - def __reduce__(self): - return _TransactionContextTLocal, () - - -class _TransactionContextManager(object): - """Provide context-management and decorator patterns for transactions. - - This object integrates user-defined "context" objects with the - :class:`._TransactionContext` class, on behalf of a - contained :class:`._TransactionFactory`. - - """ - - def __init__( - self, root=None, - mode=None, - independent=False, - savepoint=False, - connection=False, - replace_global_factory=None, - _is_global_manager=False, - allow_async=False): - - if root is None: - self._root = self - self._root_factory = _TransactionFactory() - else: - self._root = root - - self._replace_global_factory = replace_global_factory - self._is_global_manager = _is_global_manager - self._mode = mode - self._independent = independent - self._savepoint = savepoint - if self._savepoint and self._independent: - raise TypeError( - "setting savepoint and independent makes no sense.") - self._connection = connection - self._allow_async = allow_async - - @property - def _factory(self): - """The :class:`._TransactionFactory` associated with this context.""" - return self._root._root_factory - - def configure(self, **kw): - """Apply configurational options to the factory. - - This method can only be called before any specific - transaction-beginning methods have been called. - - - """ - self._factory.configure(**kw) - - def append_on_engine_create(self, fn): - """Append a listener function to _facade_cfg["on_engine_create"]""" - self._factory._facade_cfg['on_engine_create'].append(fn) - - def get_legacy_facade(self): - """Return a :class:`.LegacyEngineFacade` for factory from this context. - - This facade will make use of the same engine and sessionmaker - as this factory, however will not share the same transaction context; - the legacy facade continues to work the old way of returning - a new Session each time get_session() is called. - """ - - return self._factory.get_legacy_facade() - - def dispose_pool(self): - """Call engine.pool.dispose() on underlying Engine objects.""" - self._factory.dispose_pool() - - def make_new_manager(self): - """Create a new, independent _TransactionContextManager from this one. - - Copies the underlying _TransactionFactory to a new one, so that - it can be further configured with new options. - - Used for test environments where the application-wide - _TransactionContextManager may be used as a factory for test-local - managers. - - """ - new = self._clone() - new._root = new - new._root_factory = self._root_factory._create_factory_copy() - assert not new._factory._started - return new - - def patch_factory(self, factory_or_manager): - """Patch a _TransactionFactory into this manager. - - Replaces this manager's factory with the given one, and returns - a callable that will reset the factory back to what we - started with. - - Only works for root factories. Is intended for test suites - that need to patch in alternate database configurations. - - The given argument may be a _TransactionContextManager or a - _TransactionFactory. - - """ - - if isinstance(factory_or_manager, _TransactionContextManager): - factory = factory_or_manager._factory - elif isinstance(factory_or_manager, _TransactionFactory): - factory = factory_or_manager - else: - raise ValueError( - "_TransactionContextManager or " - "_TransactionFactory expected.") - assert self._root is self - existing_factory = self._root_factory - self._root_factory = factory - - def reset(): - self._root_factory = existing_factory - - return reset - - def patch_engine(self, engine): - """Patch an Engine into this manager. - - Replaces this manager's factory with a _TestTransactionFactory - that will use the given Engine, and returns - a callable that will reset the factory back to what we - started with. - - Only works for root factories. Is intended for test suites - that need to patch in alternate database configurations. - - """ - - existing_factory = self._factory - maker = existing_factory._writer_maker - maker_kwargs = existing_factory._maker_args_for_conf(cfg.CONF) - maker = orm.get_maker(engine=engine, **maker_kwargs) - - factory = _TestTransactionFactory( - engine, maker, - apply_global=False, - synchronous_reader=existing_factory. - _facade_cfg['synchronous_reader'] - ) - return self.patch_factory(factory) - - @property - def replace(self): - """Modifier to replace the global transaction factory with this one.""" - return self._clone(replace_global_factory=self._factory) - - @property - def writer(self): - """Modifier to set the transaction to WRITER.""" - return self._clone(mode=_WRITER) - - @property - def reader(self): - """Modifier to set the transaction to READER.""" - return self._clone(mode=_READER) - - @property - def allow_async(self): - """Modifier to allow async operations - - Allows async operations if asynchronous session is already - started in this context. Marking DB API methods with READER would make - it impossible to use them in ASYNC_READER transactions, and marking - them with ASYNC_READER would require a modification of all the places - these DB API methods are called to force READER mode, where the latest - DB state is required. - - In Nova DB API methods should have a 'safe' default (i.e. READER), - so that they can start sessions on their own, but it would also be - useful for them to be able to participate in an existing ASYNC_READER - session, if one was started up the stack. - """ - - if self._mode is _WRITER: - raise TypeError("Setting async on a WRITER makes no sense") - return self._clone(allow_async=True) - - @property - def independent(self): - """Modifier to start a transaction independent from any enclosing.""" - return self._clone(independent=True) - - @property - def savepoint(self): - """Modifier to start a SAVEPOINT if a transaction already exists.""" - return self._clone(savepoint=True) - - @property - def connection(self): - """Modifier to return a core Connection object instead of Session.""" - return self._clone(connection=True) - - @property - def async(self): - """Modifier to set a READER operation to ASYNC_READER.""" - - if self._mode is _WRITER: - raise TypeError("Setting async on a WRITER makes no sense") - return self._clone(mode=_ASYNC_READER) - - def using(self, context): - """Provide a context manager block that will use the given context.""" - return self._transaction_scope(context) - - def __call__(self, fn): - """Decorate a function.""" - argspec = inspect.getargspec(fn) - if argspec.args[0] == 'self' or argspec.args[0] == 'cls': - context_index = 1 - else: - context_index = 0 - - @functools.wraps(fn) - def wrapper(*args, **kwargs): - context = args[context_index] - - with self._transaction_scope(context): - return fn(*args, **kwargs) - - return wrapper - - def _clone(self, **kw): - default_kw = { - "independent": self._independent, - "mode": self._mode, - "connection": self._connection - } - default_kw.update(kw) - return _TransactionContextManager(root=self._root, **default_kw) - - @contextlib.contextmanager - def _transaction_scope(self, context): - new_transaction = self._independent - transaction_contexts_by_thread = \ - _transaction_contexts_by_thread(context) - - current = restore = getattr( - transaction_contexts_by_thread, "current", None) - - use_factory = self._factory - global_factory = None - - if self._replace_global_factory: - use_factory = global_factory = self._replace_global_factory - elif current is not None and current.global_factory: - global_factory = current.global_factory - - if self._root._is_global_manager: - use_factory = global_factory - - if current is not None and ( - new_transaction or current.factory is not use_factory - ): - current = None - - if current is None: - current = transaction_contexts_by_thread.current = \ - _TransactionContext( - use_factory, global_factory=global_factory, - **use_factory._transaction_ctx_cfg) - - try: - if self._mode is not None: - with current._produce_block( - mode=self._mode, - connection=self._connection, - savepoint=self._savepoint, - allow_async=self._allow_async) as resource: - yield resource - else: - yield - finally: - if restore is None: - del transaction_contexts_by_thread.current - elif current is not restore: - transaction_contexts_by_thread.current = restore - - -def _context_descriptor(attr=None): - getter = operator.attrgetter(attr) - - def _property_for_context(context): - try: - transaction_context = context.transaction_ctx - except exception.NoEngineContextEstablished: - raise exception.NoEngineContextEstablished( - "No TransactionContext is established for " - "this %s object within the current thread; " - "the %r attribute is unavailable." - % (context, attr) - ) - else: - result = getter(transaction_context) - if result is None: - raise exception.ContextNotRequestedError( - "The '%s' context attribute was requested but " - "it has not been established for this context." % attr - ) - return result - return property(_property_for_context) - - -def _transaction_ctx_for_context(context): - by_thread = _transaction_contexts_by_thread(context) - try: - return by_thread.current - except AttributeError: - raise exception.NoEngineContextEstablished( - "No TransactionContext is established for " - "this %s object within the current thread. " - % context - ) - - -def _transaction_contexts_by_thread(context): - transaction_contexts_by_thread = getattr( - context, '_enginefacade_context', None) - if transaction_contexts_by_thread is None: - transaction_contexts_by_thread = \ - context._enginefacade_context = _TransactionContextTLocal() - - return transaction_contexts_by_thread - - -def transaction_context_provider(klass): - """Decorate a class with ``session`` and ``connection`` attributes.""" - - setattr( - klass, - 'transaction_ctx', - property(_transaction_ctx_for_context)) - - # Graft transaction context attributes as context properties - for attr in ('session', 'connection', 'transaction'): - setattr(klass, attr, _context_descriptor(attr)) - - return klass - - -_context_manager = _TransactionContextManager(_is_global_manager=True) -"""default context manager.""" - - -def transaction_context(): - """Construct a local transaction context. - - """ - return _TransactionContextManager() - - -def configure(**kw): - """Apply configurational options to the global factory. - - This method can only be called before any specific transaction-beginning - methods have been called. - - .. seealso:: - - :meth:`._TransactionFactory.configure` - - """ - _context_manager._factory.configure(**kw) - - -def get_legacy_facade(): - """Return a :class:`.LegacyEngineFacade` for the global factory. - - This facade will make use of the same engine and sessionmaker - as this factory, however will not share the same transaction context; - the legacy facade continues to work the old way of returning - a new Session each time get_session() is called. - - """ - return _context_manager.get_legacy_facade() - - -reader = _context_manager.reader -"""The global 'reader' starting point.""" - - -writer = _context_manager.writer -"""The global 'writer' starting point.""" - - -class LegacyEngineFacade(object): - """A helper class for removing of global engine instances from oslo.db. - - .. deprecated:: 1.12.0 - Please use :mod:`oslo_db.sqlalchemy.enginefacade` for new development. - - As a library, oslo.db can't decide where to store/when to create engine - and sessionmaker instances, so this must be left for a target application. - - On the other hand, in order to simplify the adoption of oslo.db changes, - we'll provide a helper class, which creates engine and sessionmaker - on its instantiation and provides get_engine()/get_session() methods - that are compatible with corresponding utility functions that currently - exist in target projects, e.g. in Nova. - - engine/sessionmaker instances will still be global (and they are meant to - be global), but they will be stored in the app context, rather that in the - oslo.db context. - - Two important things to remember: - - 1. An Engine instance is effectively a pool of DB connections, so it's - meant to be shared (and it's thread-safe). - 2. A Session instance is not meant to be shared and represents a DB - transactional context (i.e. it's not thread-safe). sessionmaker is - a factory of sessions. - - :param sql_connection: the connection string for the database to use - :type sql_connection: string - - :param slave_connection: the connection string for the 'slave' database - to use. If not provided, the master database - will be used for all operations. Note: this - is meant to be used for offloading of read - operations to asynchronously replicated slaves - to reduce the load on the master database. - :type slave_connection: string - - :param sqlite_fk: enable foreign keys in SQLite - :type sqlite_fk: bool - - :param autocommit: use autocommit mode for created Session instances - :type autocommit: bool - - :param expire_on_commit: expire session objects on commit - :type expire_on_commit: bool - - Keyword arguments: - - :keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions. - (defaults to TRADITIONAL) - :keyword idle_timeout: timeout before idle sql connections are reaped - (defaults to 3600) - :keyword connection_debug: verbosity of SQL debugging information. - -1=Off, 0=None, 100=Everything (defaults - to 0) - :keyword max_pool_size: maximum number of SQL connections to keep open - in a pool (defaults to SQLAlchemy settings) - :keyword max_overflow: if set, use this value for max_overflow with - sqlalchemy (defaults to SQLAlchemy settings) - :keyword pool_timeout: if set, use this value for pool_timeout with - sqlalchemy (defaults to SQLAlchemy settings) - :keyword sqlite_synchronous: if True, SQLite uses synchronous mode - (defaults to True) - :keyword connection_trace: add python stack traces to SQL as comment - strings (defaults to False) - :keyword max_retries: maximum db connection retries during startup. - (setting -1 implies an infinite retry count) - (defaults to 10) - :keyword retry_interval: interval between retries of opening a sql - connection (defaults to 10) - :keyword thread_checkin: boolean that indicates that between each - engine checkin event a sleep(0) will occur to - allow other greenthreads to run (defaults to - True) - - """ - def __init__(self, sql_connection, slave_connection=None, - sqlite_fk=False, autocommit=True, - expire_on_commit=False, _conf=None, _factory=None, **kwargs): - warnings.warn( - "EngineFacade is deprecated; please use " - "oslo_db.sqlalchemy.enginefacade", - exception.OsloDBDeprecationWarning, - stacklevel=2) - - if _factory: - self._factory = _factory - else: - self._factory = _TransactionFactory() - - self._factory.configure( - sqlite_fk=sqlite_fk, - __autocommit=autocommit, - expire_on_commit=expire_on_commit, - **kwargs - ) - # make sure passed-in urls are favored over that - # of config - self._factory._start( - _conf, connection=sql_connection, - slave_connection=slave_connection) - - def _check_factory_started(self): - if not self._factory._started: - self._factory._start() - - def get_engine(self, use_slave=False): - """Get the engine instance (note, that it's shared). - - :param use_slave: if possible, use 'slave' database for this engine. - If the connection string for the slave database - wasn't provided, 'master' engine will be returned. - (defaults to False) - :type use_slave: bool - - """ - self._check_factory_started() - if use_slave: - return self._factory._reader_engine - else: - return self._factory._writer_engine - - def get_session(self, use_slave=False, **kwargs): - """Get a Session instance. - - :param use_slave: if possible, use 'slave' database connection for - this session. If the connection string for the - slave database wasn't provided, a session bound - to the 'master' engine will be returned. - (defaults to False) - :type use_slave: bool - - Keyword arguments will be passed to a sessionmaker instance as is (if - passed, they will override the ones used when the sessionmaker instance - was created). See SQLAlchemy Session docs for details. - - """ - self._check_factory_started() - if use_slave: - return self._factory._reader_maker(**kwargs) - else: - return self._factory._writer_maker(**kwargs) - - def get_sessionmaker(self, use_slave=False): - """Get the sessionmaker instance used to create a Session. - - This can be called for those cases where the sessionmaker() is to - be temporarily injected with some state such as a specific connection. - - """ - self._check_factory_started() - if use_slave: - return self._factory._reader_maker - else: - return self._factory._writer_maker - - @classmethod - def from_config(cls, conf, - sqlite_fk=False, autocommit=True, expire_on_commit=False): - """Initialize EngineFacade using oslo.config config instance options. - - :param conf: oslo.config config instance - :type conf: oslo_config.cfg.ConfigOpts - - :param sqlite_fk: enable foreign keys in SQLite - :type sqlite_fk: bool - - :param autocommit: use autocommit mode for created Session instances - :type autocommit: bool - - :param expire_on_commit: expire session objects on commit - :type expire_on_commit: bool - - """ - - return cls( - None, - sqlite_fk=sqlite_fk, - autocommit=autocommit, - expire_on_commit=expire_on_commit, _conf=conf) diff --git a/oslo_db/sqlalchemy/engines.py b/oslo_db/sqlalchemy/engines.py deleted file mode 100644 index 3e8bb0a..0000000 --- a/oslo_db/sqlalchemy/engines.py +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Core SQLAlchemy connectivity routines. -""" - -import itertools -import logging -import os -import re -import time - -import six -import sqlalchemy -from sqlalchemy import event -from sqlalchemy import exc -from sqlalchemy import pool -from sqlalchemy.sql.expression import select - -from oslo_db._i18n import _LW -from oslo_db import exception - -from oslo_db.sqlalchemy import exc_filters -from oslo_db.sqlalchemy import utils - -LOG = logging.getLogger(__name__) - - -def _thread_yield(dbapi_con, con_record): - """Ensure other greenthreads get a chance to be executed. - - If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will - execute instead of time.sleep(0). - Force a context switch. With common database backends (eg MySQLdb and - sqlite), there is no implicit yield caused by network I/O since they are - implemented by C libraries that eventlet cannot monkey patch. - """ - time.sleep(0) - - -def _connect_ping_listener(connection, branch): - """Ping the server at connection startup. - - Ping the server at transaction begin and transparently reconnect - if a disconnect exception occurs. - """ - if branch: - return - - # turn off "close with result". This can also be accomplished - # by branching the connection, however just setting the flag is - # more performant and also doesn't get involved with some - # connection-invalidation awkardness that occurs (see - # https://bitbucket.org/zzzeek/sqlalchemy/issue/3215/) - save_should_close_with_result = connection.should_close_with_result - connection.should_close_with_result = False - try: - # run a SELECT 1. use a core select() so that - # any details like that needed by Oracle, DB2 etc. are handled. - connection.scalar(select([1])) - except exception.DBConnectionError: - # catch DBConnectionError, which is raised by the filter - # system. - # disconnect detected. The connection is now - # "invalid", but the pool should be ready to return - # new connections assuming they are good now. - # run the select again to re-validate the Connection. - connection.scalar(select([1])) - finally: - connection.should_close_with_result = save_should_close_with_result - - -def _setup_logging(connection_debug=0): - """setup_logging function maps SQL debug level to Python log level. - - Connection_debug is a verbosity of SQL debugging information. - 0=None(default value), - 1=Processed only messages with WARNING level or higher - 50=Processed only messages with INFO level or higher - 100=Processed only messages with DEBUG level - """ - if connection_debug >= 0: - logger = logging.getLogger('sqlalchemy.engine') - if connection_debug == 100: - logger.setLevel(logging.DEBUG) - elif connection_debug >= 50: - logger.setLevel(logging.INFO) - else: - logger.setLevel(logging.WARNING) - - -def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, - idle_timeout=3600, - connection_debug=0, max_pool_size=None, max_overflow=None, - pool_timeout=None, sqlite_synchronous=True, - connection_trace=False, max_retries=10, retry_interval=10, - thread_checkin=True, logging_name=None, - json_serializer=None, - json_deserializer=None): - """Return a new SQLAlchemy engine.""" - - url = sqlalchemy.engine.url.make_url(sql_connection) - - engine_args = { - "pool_recycle": idle_timeout, - 'convert_unicode': True, - 'connect_args': {}, - 'logging_name': logging_name - } - - _setup_logging(connection_debug) - - _init_connection_args( - url, engine_args, - max_pool_size=max_pool_size, - max_overflow=max_overflow, - pool_timeout=pool_timeout, - json_serializer=json_serializer, - json_deserializer=json_deserializer, - ) - - engine = sqlalchemy.create_engine(url, **engine_args) - - _init_events( - engine, - mysql_sql_mode=mysql_sql_mode, - sqlite_synchronous=sqlite_synchronous, - sqlite_fk=sqlite_fk, - thread_checkin=thread_checkin, - connection_trace=connection_trace - ) - - # register alternate exception handler - exc_filters.register_engine(engine) - - # register engine connect handler - event.listen(engine, "engine_connect", _connect_ping_listener) - - # initial connect + test - # NOTE(viktors): the current implementation of _test_connection() - # does nothing, if max_retries == 0, so we can skip it - if max_retries: - test_conn = _test_connection(engine, max_retries, retry_interval) - test_conn.close() - - return engine - - -@utils.dispatch_for_dialect('*', multiple=True) -def _init_connection_args( - url, engine_args, - max_pool_size=None, max_overflow=None, pool_timeout=None, **kw): - - pool_class = url.get_dialect().get_pool_class(url) - if issubclass(pool_class, pool.QueuePool): - if max_pool_size is not None: - engine_args['pool_size'] = max_pool_size - if max_overflow is not None: - engine_args['max_overflow'] = max_overflow - if pool_timeout is not None: - engine_args['pool_timeout'] = pool_timeout - - -@_init_connection_args.dispatch_for("sqlite") -def _init_connection_args(url, engine_args, **kw): - pool_class = url.get_dialect().get_pool_class(url) - # singletonthreadpool is used for :memory: connections; - # replace it with StaticPool. - if issubclass(pool_class, pool.SingletonThreadPool): - engine_args["poolclass"] = pool.StaticPool - engine_args['connect_args']['check_same_thread'] = False - - -@_init_connection_args.dispatch_for("postgresql") -def _init_connection_args(url, engine_args, **kw): - if 'client_encoding' not in url.query: - # Set encoding using engine_args instead of connect_args since - # it's supported for PostgreSQL 8.*. More details at: - # http://docs.sqlalchemy.org/en/rel_0_9/dialects/postgresql.html - engine_args['client_encoding'] = 'utf8' - engine_args['json_serializer'] = kw.get('json_serializer') - engine_args['json_deserializer'] = kw.get('json_deserializer') - - -@_init_connection_args.dispatch_for("mysql") -def _init_connection_args(url, engine_args, **kw): - if 'charset' not in url.query: - engine_args['connect_args']['charset'] = 'utf8' - - -@_init_connection_args.dispatch_for("mysql+mysqlconnector") -def _init_connection_args(url, engine_args, **kw): - # mysqlconnector engine (<1.0) incorrectly defaults to - # raise_on_warnings=True - # https://bitbucket.org/zzzeek/sqlalchemy/issue/2515 - if 'raise_on_warnings' not in url.query: - engine_args['connect_args']['raise_on_warnings'] = False - - -@_init_connection_args.dispatch_for("mysql+mysqldb") -@_init_connection_args.dispatch_for("mysql+oursql") -def _init_connection_args(url, engine_args, **kw): - # Those drivers require use_unicode=0 to avoid performance drop due - # to internal usage of Python unicode objects in the driver - # http://docs.sqlalchemy.org/en/rel_0_9/dialects/mysql.html - if 'use_unicode' not in url.query: - if six.PY3: - engine_args['connect_args']['use_unicode'] = 1 - else: - engine_args['connect_args']['use_unicode'] = 0 - - -@utils.dispatch_for_dialect('*', multiple=True) -def _init_events(engine, thread_checkin=True, connection_trace=False, **kw): - """Set up event listeners for all database backends.""" - - _add_process_guards(engine) - - if connection_trace: - _add_trace_comments(engine) - - if thread_checkin: - sqlalchemy.event.listen(engine, 'checkin', _thread_yield) - - -@_init_events.dispatch_for("mysql") -def _init_events(engine, mysql_sql_mode=None, **kw): - """Set up event listeners for MySQL.""" - - if mysql_sql_mode is not None: - @sqlalchemy.event.listens_for(engine, "connect") - def _set_session_sql_mode(dbapi_con, connection_rec): - cursor = dbapi_con.cursor() - cursor.execute("SET SESSION sql_mode = %s", [mysql_sql_mode]) - - @sqlalchemy.event.listens_for(engine, "first_connect") - def _check_effective_sql_mode(dbapi_con, connection_rec): - if mysql_sql_mode is not None: - _set_session_sql_mode(dbapi_con, connection_rec) - - cursor = dbapi_con.cursor() - cursor.execute("SHOW VARIABLES LIKE 'sql_mode'") - realmode = cursor.fetchone() - - if realmode is None: - LOG.warning(_LW('Unable to detect effective SQL mode')) - else: - realmode = realmode[1] - LOG.debug('MySQL server mode set to %s', realmode) - if 'TRADITIONAL' not in realmode.upper() and \ - 'STRICT_ALL_TABLES' not in realmode.upper(): - LOG.warning( - _LW( - "MySQL SQL mode is '%s', " - "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), - realmode) - - -@_init_events.dispatch_for("sqlite") -def _init_events(engine, sqlite_synchronous=True, sqlite_fk=False, **kw): - """Set up event listeners for SQLite. - - This includes several settings made on connections as they are - created, as well as transactional control extensions. - - """ - - def regexp(expr, item): - reg = re.compile(expr) - return reg.search(six.text_type(item)) is not None - - @sqlalchemy.event.listens_for(engine, "connect") - def _sqlite_connect_events(dbapi_con, con_record): - - # Add REGEXP functionality on SQLite connections - dbapi_con.create_function('regexp', 2, regexp) - - if not sqlite_synchronous: - # Switch sqlite connections to non-synchronous mode - dbapi_con.execute("PRAGMA synchronous = OFF") - - # Disable pysqlite's emitting of the BEGIN statement entirely. - # Also stops it from emitting COMMIT before any DDL. - # below, we emit BEGIN ourselves. - # see http://docs.sqlalchemy.org/en/rel_0_9/dialects/\ - # sqlite.html#serializable-isolation-savepoints-transactional-ddl - dbapi_con.isolation_level = None - - if sqlite_fk: - # Ensures that the foreign key constraints are enforced in SQLite. - dbapi_con.execute('pragma foreign_keys=ON') - - @sqlalchemy.event.listens_for(engine, "begin") - def _sqlite_emit_begin(conn): - # emit our own BEGIN, checking for existing - # transactional state - if 'in_transaction' not in conn.info: - conn.execute("BEGIN") - conn.info['in_transaction'] = True - - @sqlalchemy.event.listens_for(engine, "rollback") - @sqlalchemy.event.listens_for(engine, "commit") - def _sqlite_end_transaction(conn): - # remove transactional marker - conn.info.pop('in_transaction', None) - - -def _test_connection(engine, max_retries, retry_interval): - if max_retries == -1: - attempts = itertools.count() - else: - attempts = six.moves.range(max_retries) - # See: http://legacy.python.org/dev/peps/pep-3110/#semantic-changes for - # why we are not using 'de' directly (it can be removed from the local - # scope). - de_ref = None - for attempt in attempts: - try: - return engine.connect() - except exception.DBConnectionError as de: - msg = _LW('SQL connection failed. %s attempts left.') - LOG.warning(msg, max_retries - attempt) - time.sleep(retry_interval) - de_ref = de - else: - if de_ref is not None: - six.reraise(type(de_ref), de_ref) - - -def _add_process_guards(engine): - """Add multiprocessing guards. - - Forces a connection to be reconnected if it is detected - as having been shared to a sub-process. - - """ - - @sqlalchemy.event.listens_for(engine, "connect") - def connect(dbapi_connection, connection_record): - connection_record.info['pid'] = os.getpid() - - @sqlalchemy.event.listens_for(engine, "checkout") - def checkout(dbapi_connection, connection_record, connection_proxy): - pid = os.getpid() - if connection_record.info['pid'] != pid: - LOG.debug(_LW( - "Parent process %(orig)s forked (%(newproc)s) with an open " - "database connection, " - "which is being discarded and recreated."), - {"newproc": pid, "orig": connection_record.info['pid']}) - connection_record.connection = connection_proxy.connection = None - raise exc.DisconnectionError( - "Connection record belongs to pid %s, " - "attempting to check out in pid %s" % - (connection_record.info['pid'], pid) - ) - - -def _add_trace_comments(engine): - """Add trace comments. - - Augment statements with a trace of the immediate calling code - for a given statement. - """ - - import os - import sys - import traceback - target_paths = set([ - os.path.dirname(sys.modules['oslo_db'].__file__), - os.path.dirname(sys.modules['sqlalchemy'].__file__) - ]) - try: - skip_paths = set([ - os.path.dirname(sys.modules['oslo_db.tests'].__file__), - ]) - except KeyError: - skip_paths = set() - - @sqlalchemy.event.listens_for(engine, "before_cursor_execute", retval=True) - def before_cursor_execute(conn, cursor, statement, parameters, context, - executemany): - - # NOTE(zzzeek) - if different steps per DB dialect are desirable - # here, switch out on engine.name for now. - stack = traceback.extract_stack() - our_line = None - - for idx, (filename, line, method, function) in enumerate(stack): - for tgt in skip_paths: - if filename.startswith(tgt): - break - else: - for tgt in target_paths: - if filename.startswith(tgt): - our_line = idx - break - if our_line: - break - - if our_line: - trace = "; ".join( - "File: %s (%s) %s" % ( - line[0], line[1], line[2] - ) - # include three lines of context. - for line in stack[our_line - 3:our_line] - - ) - statement = "%s -- %s" % (statement, trace) - - return statement, parameters diff --git a/oslo_db/sqlalchemy/exc_filters.py b/oslo_db/sqlalchemy/exc_filters.py deleted file mode 100644 index 051d1f8..0000000 --- a/oslo_db/sqlalchemy/exc_filters.py +++ /dev/null @@ -1,477 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Define exception redefinitions for SQLAlchemy DBAPI exceptions.""" - -import collections -import logging -import re -import sys - -from sqlalchemy import event -from sqlalchemy import exc as sqla_exc - -from oslo_db._i18n import _LE -from oslo_db import exception - - -LOG = logging.getLogger(__name__) - - -_registry = collections.defaultdict( - lambda: collections.defaultdict( - list - ) -) - - -def filters(dbname, exception_type, regex): - """Mark a function as receiving a filtered exception. - - :param dbname: string database name, e.g. 'mysql' - :param exception_type: a SQLAlchemy database exception class, which - extends from :class:`sqlalchemy.exc.DBAPIError`. - :param regex: a string, or a tuple of strings, that will be processed - as matching regular expressions. - - """ - def _receive(fn): - _registry[dbname][exception_type].extend( - (fn, re.compile(reg)) - for reg in - ((regex,) if not isinstance(regex, tuple) else regex) - ) - return fn - return _receive - - -# NOTE(zzzeek) - for Postgresql, catch both OperationalError, as the -# actual error is -# psycopg2.extensions.TransactionRollbackError(OperationalError), -# as well as sqlalchemy.exc.DBAPIError, as SQLAlchemy will reraise it -# as this until issue #3075 is fixed. -@filters("mysql", sqla_exc.OperationalError, r"^.*\b1213\b.*Deadlock found.*") -@filters("mysql", sqla_exc.DatabaseError, - r"^.*\b1205\b.*Lock wait timeout exceeded.*") -@filters("mysql", sqla_exc.InternalError, r"^.*\b1213\b.*Deadlock found.*") -@filters("postgresql", sqla_exc.OperationalError, r"^.*deadlock detected.*") -@filters("postgresql", sqla_exc.DBAPIError, r"^.*deadlock detected.*") -@filters("ibm_db_sa", sqla_exc.DBAPIError, r"^.*SQL0911N.*") -def _deadlock_error(operational_error, match, engine_name, is_disconnect): - """Filter for MySQL or Postgresql deadlock error. - - NOTE(comstud): In current versions of DB backends, Deadlock violation - messages follow the structure: - - mysql+mysqldb: - (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' - 'restarting transaction') - - mysql+mysqlconnector: - (InternalError) 1213 (40001): Deadlock found when trying to get lock; try - restarting transaction - - postgresql: - (TransactionRollbackError) deadlock detected - - - ibm_db_sa: - SQL0911N The current transaction has been rolled back because of a - deadlock or timeout - - """ - raise exception.DBDeadlock(operational_error) - - -@filters("mysql", sqla_exc.IntegrityError, - r"^.*\b1062\b.*Duplicate entry '(?P.*)'" - r" for key '(?P[^']+)'.*$") -# NOTE(jd) For binary types -@filters("mysql", sqla_exc.IntegrityError, - r"^.*\b1062\b.*Duplicate entry \\'(?P.*)\\'" - r" for key \\'(?P.+)\\'.*$") -# NOTE(pkholkin): the first regex is suitable only for PostgreSQL 9.x versions -# the second regex is suitable for PostgreSQL 8.x versions -@filters("postgresql", sqla_exc.IntegrityError, - (r'^.*duplicate\s+key.*"(?P[^"]+)"\s*\n.*' - r'Key\s+\((?P.*)\)=\((?P.*)\)\s+already\s+exists.*$', - r"^.*duplicate\s+key.*\"(?P[^\"]+)\"\s*\n.*$")) -def _default_dupe_key_error(integrity_error, match, engine_name, - is_disconnect): - """Filter for MySQL or Postgresql duplicate key error. - - note(boris-42): In current versions of DB backends unique constraint - violation messages follow the structure: - - postgres: - 1 column - (IntegrityError) duplicate key value violates unique - constraint "users_c1_key" - N columns - (IntegrityError) duplicate key value violates unique - constraint "name_of_our_constraint" - - mysql+mysqldb: - 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key - 'c1'") - N columns - (IntegrityError) (1062, "Duplicate entry 'values joined - with -' for key 'name_of_our_constraint'") - - mysql+mysqlconnector: - 1 column - (IntegrityError) 1062 (23000): Duplicate entry 'value_of_c1' for - key 'c1' - N columns - (IntegrityError) 1062 (23000): Duplicate entry 'values - joined with -' for key 'name_of_our_constraint' - - - - """ - - columns = match.group('columns') - - # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" - # where `t` it is table name and columns `c1`, `c2` - # are in UniqueConstraint. - uniqbase = "uniq_" - if not columns.startswith(uniqbase): - if engine_name == "postgresql": - columns = [columns[columns.index("_") + 1:columns.rindex("_")]] - else: - columns = [columns] - else: - columns = columns[len(uniqbase):].split("0")[1:] - - value = match.groupdict().get('value') - - raise exception.DBDuplicateEntry(columns, integrity_error, value) - - -@filters("sqlite", sqla_exc.IntegrityError, - (r"^.*columns?(?P[^)]+)(is|are)\s+not\s+unique$", - r"^.*UNIQUE\s+constraint\s+failed:\s+(?P.+)$", - r"^.*PRIMARY\s+KEY\s+must\s+be\s+unique.*$")) -def _sqlite_dupe_key_error(integrity_error, match, engine_name, is_disconnect): - """Filter for SQLite duplicate key error. - - note(boris-42): In current versions of DB backends unique constraint - violation messages follow the structure: - - sqlite: - 1 column - (IntegrityError) column c1 is not unique - N columns - (IntegrityError) column c1, c2, ..., N are not unique - - sqlite since 3.7.16: - 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 - N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 - - sqlite since 3.8.2: - (IntegrityError) PRIMARY KEY must be unique - - """ - columns = [] - # NOTE(ochuprykov): We can get here by last filter in which there are no - # groups. Trying to access the substring that matched by - # the group will lead to IndexError. In this case just - # pass empty list to exception.DBDuplicateEntry - try: - columns = match.group('columns') - columns = [c.split('.')[-1] for c in columns.strip().split(", ")] - except IndexError: - pass - - raise exception.DBDuplicateEntry(columns, integrity_error) - - -@filters("sqlite", sqla_exc.IntegrityError, - r"(?i).*foreign key constraint failed") -@filters("postgresql", sqla_exc.IntegrityError, - r".*on table \"(?P[^\"]+)\" violates " - "foreign key constraint \"(?P[^\"]+)\".*\n" - "DETAIL: Key \((?P.+)\)=\(.+\) " - "is (not present in|still referenced from) table " - "\"(?P[^\"]+)\".") -@filters("mysql", sqla_exc.IntegrityError, - r".* u?'Cannot (add|delete) or update a (child|parent) row: " - 'a foreign key constraint fails \([`"].+[`"]\.[`"](?P
.+)[`"], ' - 'CONSTRAINT [`"](?P.+)[`"] FOREIGN KEY ' - '\([`"](?P.+)[`"]\) REFERENCES [`"](?P.+)[`"] ') -def _foreign_key_error(integrity_error, match, engine_name, is_disconnect): - """Filter for foreign key errors.""" - - try: - table = match.group("table") - except IndexError: - table = None - try: - constraint = match.group("constraint") - except IndexError: - constraint = None - try: - key = match.group("key") - except IndexError: - key = None - try: - key_table = match.group("key_table") - except IndexError: - key_table = None - - raise exception.DBReferenceError(table, constraint, key, key_table, - integrity_error) - - -@filters("postgresql", sqla_exc.IntegrityError, - r".*new row for relation \"(?P
.+)\" " - "violates check constraint " - "\"(?P.+)\"") -def _check_constraint_error( - integrity_error, match, engine_name, is_disconnect): - """Filter for check constraint errors.""" - - try: - table = match.group("table") - except IndexError: - table = None - try: - check_name = match.group("check_name") - except IndexError: - check_name = None - - raise exception.DBConstraintError(table, check_name, integrity_error) - - -@filters("postgresql", sqla_exc.ProgrammingError, - r".* constraint \"(?P.+)\" " - "of relation " - "\"(?P.+)\" does not exist") -@filters("mysql", sqla_exc.InternalError, - r".*1091,.*Can't DROP '(?P.+)'; " - "check that column/key exists") -@filters("mysql", sqla_exc.InternalError, - r".*1025,.*Error on rename of '.+/(?P.+)' to ") -def _check_constraint_non_existing( - programming_error, match, engine_name, is_disconnect): - """Filter for constraint non existing errors.""" - - try: - relation = match.group("relation") - except IndexError: - relation = None - - try: - constraint = match.group("constraint") - except IndexError: - constraint = None - - raise exception.DBNonExistentConstraint(relation, - constraint, - programming_error) - - -@filters("sqlite", sqla_exc.OperationalError, - r".* no such table: (?P
.+)") -@filters("mysql", sqla_exc.InternalError, - r".*1051,.*\"Unknown table '(.+\.)?(?P
.+)'\"") -@filters("postgresql", sqla_exc.ProgrammingError, - r".* table \"(?P
.+)\" does not exist") -def _check_table_non_existing( - programming_error, match, engine_name, is_disconnect): - """Filter for table non existing errors.""" - raise exception.DBNonExistentTable(match.group("table"), programming_error) - - -@filters("ibm_db_sa", sqla_exc.IntegrityError, r"^.*SQL0803N.*$") -def _db2_dupe_key_error(integrity_error, match, engine_name, is_disconnect): - """Filter for DB2 duplicate key errors. - - N columns - (IntegrityError) SQL0803N One or more values in the INSERT - statement, UPDATE statement, or foreign key update caused by a - DELETE statement are not valid because the primary key, unique - constraint or unique index identified by "2" constrains table - "NOVA.KEY_PAIRS" from having duplicate values for the index - key. - - """ - - # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the - # columns so we have to omit that from the DBDuplicateEntry error. - raise exception.DBDuplicateEntry([], integrity_error) - - -@filters("mysql", sqla_exc.DBAPIError, r".*\b1146\b") -def _raise_mysql_table_doesnt_exist_asis( - error, match, engine_name, is_disconnect): - """Raise MySQL error 1146 as is. - - Raise MySQL error 1146 as is, so that it does not conflict with - the MySQL dialect's checking a table not existing. - """ - - raise error - - -@filters("mysql", sqla_exc.OperationalError, - r".*(1292|1366).*Incorrect \w+ value.*") -@filters("mysql", sqla_exc.DataError, - r".*1265.*Data truncated for column.*") -@filters("mysql", sqla_exc.DataError, - r".*1264.*Out of range value for column.*") -@filters("mysql", sqla_exc.InternalError, - r"^.*1366.*Incorrect string value:*") -@filters("sqlite", sqla_exc.ProgrammingError, - r"(?i).*You must not use 8-bit bytestrings*") -def _raise_data_error(error, match, engine_name, is_disconnect): - """Raise DBDataError exception for different data errors.""" - - raise exception.DBDataError(error) - - -@filters("*", sqla_exc.OperationalError, r".*") -def _raise_operational_errors_directly_filter(operational_error, - match, engine_name, - is_disconnect): - """Filter for all remaining OperationalError classes and apply. - - Filter for all remaining OperationalError classes and apply - special rules. - """ - if is_disconnect: - # operational errors that represent disconnect - # should be wrapped - raise exception.DBConnectionError(operational_error) - else: - # NOTE(comstud): A lot of code is checking for OperationalError - # so let's not wrap it for now. - raise operational_error - - -@filters("mysql", sqla_exc.OperationalError, r".*\(.*(?:2002|2003|2006|2013|1047)") # noqa -@filters("postgresql", sqla_exc.OperationalError, r".*could not connect to server") # noqa -@filters("ibm_db_sa", sqla_exc.OperationalError, r".*(?:30081)") -def _is_db_connection_error(operational_error, match, engine_name, - is_disconnect): - """Detect the exception as indicating a recoverable error on connect.""" - raise exception.DBConnectionError(operational_error) - - -@filters("*", sqla_exc.DBAPIError, r".*") -def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect): - """Filter for remaining DBAPIErrors. - - Filter for remaining DBAPIErrors and wrap if they represent - a disconnect error. - """ - if is_disconnect: - raise exception.DBConnectionError(error) - else: - LOG.exception( - _LE('DBAPIError exception wrapped from %s') % error) - raise exception.DBError(error) - - -@filters('*', UnicodeEncodeError, r".*") -def _raise_for_unicode_encode(error, match, engine_name, is_disconnect): - raise exception.DBInvalidUnicodeParameter() - - -@filters("*", Exception, r".*") -def _raise_for_all_others(error, match, engine_name, is_disconnect): - LOG.exception(_LE('DB exception wrapped.')) - raise exception.DBError(error) - -ROLLBACK_CAUSE_KEY = 'oslo.db.sp_rollback_cause' - - -def handler(context): - """Iterate through available filters and invoke those which match. - - The first one which raises wins. The order in which the filters - are attempted is sorted by specificity - dialect name or "*", - exception class per method resolution order (``__mro__``). - Method resolution order is used so that filter rules indicating a - more specific exception class are attempted first. - - """ - def _dialect_registries(engine): - if engine.dialect.name in _registry: - yield _registry[engine.dialect.name] - if '*' in _registry: - yield _registry['*'] - - for per_dialect in _dialect_registries(context.engine): - for exc in ( - context.sqlalchemy_exception, - context.original_exception): - for super_ in exc.__class__.__mro__: - if super_ in per_dialect: - regexp_reg = per_dialect[super_] - for fn, regexp in regexp_reg: - match = regexp.match(exc.args[0]) - if match: - try: - fn( - exc, - match, - context.engine.dialect.name, - context.is_disconnect) - except exception.DBError as dbe: - if ( - context.connection is not None and - not context.connection.closed and - not context.connection.invalidated and - ROLLBACK_CAUSE_KEY - in context.connection.info - ): - dbe.cause = \ - context.connection.info.pop( - ROLLBACK_CAUSE_KEY) - - if isinstance( - dbe, exception.DBConnectionError): - context.is_disconnect = True - raise - - -def register_engine(engine): - event.listen(engine, "handle_error", handler) - - @event.listens_for(engine, "rollback_savepoint") - def rollback_savepoint(conn, name, context): - exc_info = sys.exc_info() - if exc_info[1]: - conn.info[ROLLBACK_CAUSE_KEY] = exc_info[1] - # NOTE(zzzeek) this eliminates a reference cycle between tracebacks - # that would occur in Python 3 only, which has been shown to occur if - # this function were in fact part of the traceback. That's not the - # case here however this is left as a defensive measure. - del exc_info - - # try to clear the "cause" ASAP outside of savepoints, - # by grabbing the end of transaction events... - @event.listens_for(engine, "rollback") - @event.listens_for(engine, "commit") - def pop_exc_tx(conn): - conn.info.pop(ROLLBACK_CAUSE_KEY, None) - - # .. as well as connection pool checkin (just in case). - # the .info dictionary lasts as long as the DBAPI connection itself - # and is cleared out when the connection is recycled or closed - # due to invalidate etc. - @event.listens_for(engine, "checkin") - def pop_exc_checkin(dbapi_conn, connection_record): - connection_record.info.pop(ROLLBACK_CAUSE_KEY, None) - - -def handle_connect_error(engine): - """Connect to the engine, including handle_error handlers. - - The compat library now builds this into the engine.connect() - system as per SQLAlchemy 1.0's behavior. - - """ - return engine.connect() diff --git a/oslo_db/sqlalchemy/migration.py b/oslo_db/sqlalchemy/migration.py deleted file mode 100644 index cbc8b28..0000000 --- a/oslo_db/sqlalchemy/migration.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding=utf-8 - -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Base on code in migrate/changeset/databases/sqlite.py which is under -# the following license: -# -# The MIT License -# -# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -import os - -from migrate import exceptions as versioning_exceptions -from migrate.versioning import api as versioning_api -from migrate.versioning.repository import Repository -import sqlalchemy - -from oslo_db._i18n import _ -from oslo_db import exception - - -def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True): - """Upgrade or downgrade a database. - - Function runs the upgrade() or downgrade() functions in change scripts. - - :param engine: SQLAlchemy engine instance for a given database - :param abs_path: Absolute path to migrate repository. - :param version: Database will upgrade/downgrade until this version. - If None - database will update to the latest - available version. - :param init_version: Initial database version - :param sanity_check: Require schema sanity checking for all tables - """ - - if version is not None: - try: - version = int(version) - except ValueError: - raise exception.DBMigrationError(_("version should be an integer")) - - current_version = db_version(engine, abs_path, init_version) - repository = _find_migrate_repo(abs_path) - if sanity_check: - _db_schema_sanity_check(engine) - if version is None or version > current_version: - migration = versioning_api.upgrade(engine, repository, version) - else: - migration = versioning_api.downgrade(engine, repository, - version) - if sanity_check: - _db_schema_sanity_check(engine) - - return migration - - -def _db_schema_sanity_check(engine): - """Ensure all database tables were created with required parameters. - - :param engine: SQLAlchemy engine instance for a given database - - """ - - if engine.name == 'mysql': - onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' - 'from information_schema.TABLES ' - 'where TABLE_SCHEMA=%s and ' - 'TABLE_COLLATION NOT LIKE \'%%utf8%%\'') - - # NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic - # versioning tables from the tables we need to verify utf8 status on. - # Non-standard table names are not supported. - EXCLUDED_TABLES = ['migrate_version', 'alembic_version'] - - table_names = [res[0] for res in - engine.execute(onlyutf8_sql, engine.url.database) if - res[0].lower() not in EXCLUDED_TABLES] - - if len(table_names) > 0: - raise ValueError(_('Tables "%s" have non utf8 collation, ' - 'please make sure all tables are CHARSET=utf8' - ) % ','.join(table_names)) - - -def db_version(engine, abs_path, init_version): - """Show the current version of the repository. - - :param engine: SQLAlchemy engine instance for a given database - :param abs_path: Absolute path to migrate repository - :param init_version: Initial database version - """ - repository = _find_migrate_repo(abs_path) - try: - return versioning_api.db_version(engine, repository) - except versioning_exceptions.DatabaseNotControlledError: - meta = sqlalchemy.MetaData() - meta.reflect(bind=engine) - tables = meta.tables - if len(tables) == 0 or 'alembic_version' in tables: - db_version_control(engine, abs_path, version=init_version) - return versioning_api.db_version(engine, repository) - else: - raise exception.DBMigrationError( - _("The database is not under version control, but has " - "tables. Please stamp the current version of the schema " - "manually.")) - - -def db_version_control(engine, abs_path, version=None): - """Mark a database as under this repository's version control. - - Once a database is under version control, schema changes should - only be done via change scripts in this repository. - - :param engine: SQLAlchemy engine instance for a given database - :param abs_path: Absolute path to migrate repository - :param version: Initial database version - """ - repository = _find_migrate_repo(abs_path) - - try: - versioning_api.version_control(engine, repository, version) - except versioning_exceptions.InvalidVersionError as ex: - raise exception.DBMigrationError("Invalid version : %s" % ex) - except versioning_exceptions.DatabaseAlreadyControlledError: - raise exception.DBMigrationError("Database is already controlled.") - - return version - - -def _find_migrate_repo(abs_path): - """Get the project's change script repository - - :param abs_path: Absolute path to migrate repository - """ - if not os.path.exists(abs_path): - raise exception.DBMigrationError("Path %s not found" % abs_path) - return Repository(abs_path) diff --git a/oslo_db/sqlalchemy/migration_cli/README.rst b/oslo_db/sqlalchemy/migration_cli/README.rst deleted file mode 100644 index ebbbdcb..0000000 --- a/oslo_db/sqlalchemy/migration_cli/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -This module could be used either for: -1. Smooth transition from migrate tool to alembic -2. As standalone alembic tool - -Core points: -1. Upgrade/downgrade database with usage of alembic/migrate migrations -or both -2. Compatibility with oslo.config -3. The way to autogenerate new revisions or stamps diff --git a/oslo_db/sqlalchemy/migration_cli/__init__.py b/oslo_db/sqlalchemy/migration_cli/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/oslo_db/sqlalchemy/migration_cli/ext_alembic.py b/oslo_db/sqlalchemy/migration_cli/ext_alembic.py deleted file mode 100644 index e44fbc8..0000000 --- a/oslo_db/sqlalchemy/migration_cli/ext_alembic.py +++ /dev/null @@ -1,112 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import alembic -from alembic import config as alembic_config -import alembic.migration as alembic_migration -from alembic import script as alembic_script - -from oslo_db.sqlalchemy.migration_cli import ext_base - - -class AlembicExtension(ext_base.MigrationExtensionBase): - """Extension to provide alembic features. - - :param engine: SQLAlchemy engine instance for a given database - :type engine: sqlalchemy.engine.Engine - :param migration_config: Stores specific configuration for migrations - :type migration_config: dict - """ - - order = 2 - - @property - def enabled(self): - return os.path.exists(self.alembic_ini_path) - - def __init__(self, engine, migration_config): - self.alembic_ini_path = migration_config.get('alembic_ini_path', '') - self.config = alembic_config.Config(self.alembic_ini_path) - # TODO(viktors): Remove this, when we will use Alembic 0.7.5 or - # higher, because the ``attributes`` dictionary was - # added to Alembic in version 0.7.5. - if not hasattr(self.config, 'attributes'): - self.config.attributes = {} - # option should be used if script is not in default directory - repo_path = migration_config.get('alembic_repo_path') - if repo_path: - self.config.set_main_option('script_location', repo_path) - self.engine = engine - - def upgrade(self, version): - with self.engine.begin() as connection: - self.config.attributes['connection'] = connection - return alembic.command.upgrade(self.config, version or 'head') - - def downgrade(self, version): - if isinstance(version, int) or version is None or version.isdigit(): - version = 'base' - with self.engine.begin() as connection: - self.config.attributes['connection'] = connection - return alembic.command.downgrade(self.config, version) - - def version(self): - with self.engine.connect() as conn: - context = alembic_migration.MigrationContext.configure(conn) - return context.get_current_revision() - - def revision(self, message='', autogenerate=False): - """Creates template for migration. - - :param message: Text that will be used for migration title - :type message: string - :param autogenerate: If True - generates diff based on current database - state - :type autogenerate: bool - """ - with self.engine.begin() as connection: - self.config.attributes['connection'] = connection - return alembic.command.revision(self.config, message=message, - autogenerate=autogenerate) - - def stamp(self, revision): - """Stamps database with provided revision. - - :param revision: Should match one from repository or head - to stamp - database with most recent revision - :type revision: string - """ - with self.engine.begin() as connection: - self.config.attributes['connection'] = connection - return alembic.command.stamp(self.config, revision=revision) - - def has_revision(self, rev_id): - if rev_id in ['base', 'head']: - return True - - # Although alembic supports relative upgrades and downgrades, - # get_revision always returns False for relative revisions. - # Since only alembic supports relative revisions, assume the - # revision belongs to this plugin. - if rev_id: # rev_id can be None, so the check is required - if '-' in rev_id or '+' in rev_id: - return True - - script = alembic_script.ScriptDirectory( - self.config.get_main_option('script_location')) - try: - script.get_revision(rev_id) - return True - except alembic.util.CommandError: - return False diff --git a/oslo_db/sqlalchemy/migration_cli/ext_base.py b/oslo_db/sqlalchemy/migration_cli/ext_base.py deleted file mode 100644 index 184003d..0000000 --- a/oslo_db/sqlalchemy/migration_cli/ext_base.py +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class MigrationExtensionBase(object): - - # used to sort migration in logical order - order = 0 - - @property - def enabled(self): - """Used for availability verification of a plugin. - - :rtype: bool - """ - return False - - @abc.abstractmethod - def upgrade(self, version): - """Used for upgrading database. - - :param version: Desired database version - :type version: string - """ - - @abc.abstractmethod - def downgrade(self, version): - """Used for downgrading database. - - :param version: Desired database version - :type version: string - """ - - @abc.abstractmethod - def version(self): - """Current database version. - - :returns: Databse version - :rtype: string - """ - - def revision(self, *args, **kwargs): - """Used to generate migration script. - - In migration engines that support this feature, it should generate - new migration script. - - Accept arbitrary set of arguments. - """ - raise NotImplementedError() - - def stamp(self, *args, **kwargs): - """Stamps database based on plugin features. - - Accept arbitrary set of arguments. - """ - raise NotImplementedError() - - def has_revision(self, rev_id): - """Checks whether the repo contains a revision - - :param rev_id: Revision to check - :returns: Whether the revision is in the repo - :rtype: bool - """ - raise NotImplementedError() - - def __cmp__(self, other): - """Used for definition of plugin order. - - :param other: MigrationExtensionBase instance - :rtype: bool - """ - return self.order > other.order diff --git a/oslo_db/sqlalchemy/migration_cli/ext_migrate.py b/oslo_db/sqlalchemy/migration_cli/ext_migrate.py deleted file mode 100644 index 4002972..0000000 --- a/oslo_db/sqlalchemy/migration_cli/ext_migrate.py +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os - -from migrate.versioning import version as migrate_version - -from oslo_db._i18n import _LE -from oslo_db.sqlalchemy import migration -from oslo_db.sqlalchemy.migration_cli import ext_base - - -LOG = logging.getLogger(__name__) - - -class MigrateExtension(ext_base.MigrationExtensionBase): - """Extension to provide sqlalchemy-migrate features. - - :param migration_config: Stores specific configuration for migrations - :type migration_config: dict - """ - - order = 1 - - def __init__(self, engine, migration_config): - self.engine = engine - self.repository = migration_config.get('migration_repo_path', '') - self.init_version = migration_config.get('init_version', 0) - - @property - def enabled(self): - return os.path.exists(self.repository) - - def upgrade(self, version): - version = None if version == 'head' else version - return migration.db_sync( - self.engine, self.repository, version, - init_version=self.init_version) - - def downgrade(self, version): - try: - # version for migrate should be valid int - else skip - if version in ('base', None): - version = self.init_version - version = int(version) - return migration.db_sync( - self.engine, self.repository, version, - init_version=self.init_version) - except ValueError: - LOG.error( - _LE('Migration number for migrate plugin must be valid ' - 'integer or empty, if you want to downgrade ' - 'to initial state') - ) - raise - - def version(self): - return migration.db_version( - self.engine, self.repository, init_version=self.init_version) - - def has_revision(self, rev_id): - collection = migrate_version.Collection(self.repository) - try: - collection.version(rev_id) - return True - except (KeyError, ValueError): - # NOTE(breton): migrate raises KeyError if an int is passed but not - # found in the list of revisions and ValueError if non-int is - # passed. Both mean there is no requested revision. - return False diff --git a/oslo_db/sqlalchemy/migration_cli/manager.py b/oslo_db/sqlalchemy/migration_cli/manager.py deleted file mode 100644 index 4268331..0000000 --- a/oslo_db/sqlalchemy/migration_cli/manager.py +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from stevedore import enabled - -from oslo_db import exception - - -MIGRATION_NAMESPACE = 'oslo.db.migration' - - -def check_plugin_enabled(ext): - """Used for EnabledExtensionManager.""" - return ext.obj.enabled - - -class MigrationManager(object): - - def __init__(self, migration_config, engine=None): - if engine is None: - if migration_config.get('db_url'): - engine = sqlalchemy.create_engine( - migration_config['db_url'], - poolclass=sqlalchemy.pool.NullPool, - ) - else: - raise ValueError('Either database url or engine' - ' must be provided.') - - self._manager = enabled.EnabledExtensionManager( - MIGRATION_NAMESPACE, - check_plugin_enabled, - invoke_args=(engine, migration_config), - invoke_on_load=True - ) - if not self._plugins: - raise ValueError('There must be at least one plugin active.') - - @property - def _plugins(self): - return sorted(ext.obj for ext in self._manager.extensions) - - def upgrade(self, revision): - """Upgrade database with all available backends.""" - # a revision exists only in a single plugin. Until we reached it, we - # should upgrade to the plugins' heads. - # revision=None is a special case meaning latest revision. - rev_in_plugins = [p.has_revision(revision) for p in self._plugins] - if not any(rev_in_plugins) and revision is not None: - raise exception.DBMigrationError('Revision does not exist') - - results = [] - for plugin, has_revision in zip(self._plugins, rev_in_plugins): - if not has_revision or revision is None: - results.append(plugin.upgrade(None)) - else: - results.append(plugin.upgrade(revision)) - break - return results - - def downgrade(self, revision): - """Downgrade database with available backends.""" - # a revision exists only in a single plugin. Until we reached it, we - # should upgrade to the plugins' first revision. - # revision=None is a special case meaning initial revision. - rev_in_plugins = [p.has_revision(revision) for p in self._plugins] - if not any(rev_in_plugins) and revision is not None: - raise exception.DBMigrationError('Revision does not exist') - - # downgrading should be performed in reversed order - results = [] - for plugin, has_revision in zip(reversed(self._plugins), - reversed(rev_in_plugins)): - if not has_revision or revision is None: - results.append(plugin.downgrade(None)) - else: - results.append(plugin.downgrade(revision)) - break - return results - - def version(self): - """Return last version of db.""" - last = None - for plugin in self._plugins: - version = plugin.version() - if version is not None: - last = version - return last - - def revision(self, message, autogenerate): - """Generate template or autogenerated revision.""" - # revision should be done only by last plugin - return self._plugins[-1].revision(message, autogenerate) - - def stamp(self, revision): - """Create stamp for a given revision.""" - return self._plugins[-1].stamp(revision) diff --git a/oslo_db/sqlalchemy/models.py b/oslo_db/sqlalchemy/models.py deleted file mode 100644 index 0c4e0de..0000000 --- a/oslo_db/sqlalchemy/models.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Piston Cloud Computing, Inc. -# Copyright 2012 Cloudscaling Group, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -SQLAlchemy models. -""" - -import six - -from oslo_utils import timeutils -from sqlalchemy import Column, Integer -from sqlalchemy import DateTime -from sqlalchemy.orm import object_mapper - - -class ModelBase(six.Iterator): - """Base class for models.""" - __table_initialized__ = False - - def save(self, session): - """Save this object.""" - - # NOTE(boris-42): This part of code should be look like: - # session.add(self) - # session.flush() - # But there is a bug in sqlalchemy and eventlet that - # raises NoneType exception if there is no running - # transaction and rollback is called. As long as - # sqlalchemy has this bug we have to create transaction - # explicitly. - with session.begin(subtransactions=True): - session.add(self) - session.flush() - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __getitem__(self, key): - return getattr(self, key) - - def __contains__(self, key): - # Don't use hasattr() because hasattr() catches any exception, not only - # AttributeError. We want to passthrough SQLAlchemy exceptions - # (ex: sqlalchemy.orm.exc.DetachedInstanceError). - try: - getattr(self, key) - except AttributeError: - return False - else: - return True - - def get(self, key, default=None): - return getattr(self, key, default) - - @property - def _extra_keys(self): - """Specifies custom fields - - Subclasses can override this property to return a list - of custom fields that should be included in their dict - representation. - - For reference check tests/db/sqlalchemy/test_models.py - """ - return [] - - def __iter__(self): - columns = list(dict(object_mapper(self).columns).keys()) - # NOTE(russellb): Allow models to specify other keys that can be looked - # up, beyond the actual db columns. An example would be the 'name' - # property for an Instance. - columns.extend(self._extra_keys) - - return ModelIterator(self, iter(columns)) - - def update(self, values): - """Make the model object behave like a dict.""" - for k, v in six.iteritems(values): - setattr(self, k, v) - - def _as_dict(self): - """Make the model object behave like a dict. - - Includes attributes from joins. - """ - local = dict((key, value) for key, value in self) - joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) - if not k[0] == '_']) - local.update(joined) - return local - - def iteritems(self): - """Make the model object behave like a dict.""" - return six.iteritems(self._as_dict()) - - def items(self): - """Make the model object behave like a dict.""" - return self._as_dict().items() - - def keys(self): - """Make the model object behave like a dict.""" - return [key for key, value in self.iteritems()] - - -class ModelIterator(six.Iterator): - - def __init__(self, model, columns): - self.model = model - self.i = columns - - def __iter__(self): - return self - - # In Python 3, __next__() has replaced next(). - def __next__(self): - n = six.advance_iterator(self.i) - return n, getattr(self.model, n) - - -class TimestampMixin(object): - created_at = Column(DateTime, default=lambda: timeutils.utcnow()) - updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) - - -class SoftDeleteMixin(object): - deleted_at = Column(DateTime) - deleted = Column(Integer, default=0) - - def soft_delete(self, session): - """Mark this object as deleted.""" - self.deleted = self.id - self.deleted_at = timeutils.utcnow() - self.save(session=session) diff --git a/oslo_db/sqlalchemy/orm.py b/oslo_db/sqlalchemy/orm.py deleted file mode 100644 index decd8c8..0000000 --- a/oslo_db/sqlalchemy/orm.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""SQLAlchemy ORM connectivity and query structures. -""" - -from oslo_utils import timeutils -import sqlalchemy.orm -from sqlalchemy.sql.expression import literal_column - -from oslo_db.sqlalchemy import update_match - - -class Query(sqlalchemy.orm.query.Query): - """Subclass of sqlalchemy.query with soft_delete() method.""" - def soft_delete(self, synchronize_session='evaluate'): - return self.update({'deleted': literal_column('id'), - 'updated_at': literal_column('updated_at'), - 'deleted_at': timeutils.utcnow()}, - synchronize_session=synchronize_session) - - def update_returning_pk(self, values, surrogate_key): - """Perform an UPDATE, returning the primary key of the matched row. - - This is a method-version of - oslo_db.sqlalchemy.update_match.update_returning_pk(); see that - function for usage details. - - """ - return update_match.update_returning_pk(self, values, surrogate_key) - - def update_on_match(self, specimen, surrogate_key, values, **kw): - """Emit an UPDATE statement matching the given specimen. - - This is a method-version of - oslo_db.sqlalchemy.update_match.update_on_match(); see that function - for usage details. - - """ - return update_match.update_on_match( - self, specimen, surrogate_key, values, **kw) - - -class Session(sqlalchemy.orm.session.Session): - """oslo.db-specific Session subclass.""" - - -def get_maker(engine, autocommit=True, expire_on_commit=False): - """Return a SQLAlchemy sessionmaker using the given engine.""" - return sqlalchemy.orm.sessionmaker(bind=engine, - class_=Session, - autocommit=autocommit, - expire_on_commit=expire_on_commit, - query_cls=Query) diff --git a/oslo_db/sqlalchemy/provision.py b/oslo_db/sqlalchemy/provision.py deleted file mode 100644 index 7a1eaee..0000000 --- a/oslo_db/sqlalchemy/provision.py +++ /dev/null @@ -1,582 +0,0 @@ -# Copyright 2013 Mirantis.inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provision test environment for specific DB backends""" - -import abc -import logging -import os -import random -import re -import string - -import six -from six import moves -import sqlalchemy -from sqlalchemy.engine import url as sa_url -from sqlalchemy import schema -import testresources - -from oslo_db._i18n import _LI -from oslo_db import exception -from oslo_db.sqlalchemy import session -from oslo_db.sqlalchemy import utils - -LOG = logging.getLogger(__name__) - - -class ProvisionedDatabase(object): - pass - - -class BackendResource(testresources.TestResourceManager): - def __init__(self, database_type): - super(BackendResource, self).__init__() - self.database_type = database_type - self.backend = Backend.backend_for_database_type(self.database_type) - - def make(self, dependency_resources): - return self.backend - - def isDirty(self): - return False - - -class DatabaseResource(testresources.TestResourceManager): - - def __init__(self, database_type): - super(DatabaseResource, self).__init__() - self.database_type = database_type - self.resources = [ - ('backend', BackendResource(database_type)) - ] - - def make(self, dependency_resources): - dependency_resources['db_token'] = db_token = _random_ident() - backend = dependency_resources['backend'] - LOG.info( - "CREATE BACKEND %s TOKEN %s", backend.engine.url, db_token) - backend.create_named_database(db_token, conditional=True) - dependency_resources['engine'] = \ - backend.provisioned_engine(db_token) - return ProvisionedDatabase() - - def clean(self, resource): - resource.engine.dispose() - LOG.info( - "DROP BACKEND %s TOKEN %s", - resource.backend.engine, resource.db_token) - resource.backend.drop_named_database(resource.db_token) - - def isDirty(self): - return False - - -class TransactionResource(testresources.TestResourceManager): - - def __init__(self, database_resource, schema_resource): - super(TransactionResource, self).__init__() - self.resources = [ - ('database', database_resource), - ('schema', schema_resource) - ] - - def clean(self, resource): - resource._dispose() - - def make(self, dependency_resources): - conn = dependency_resources['database'].engine.connect() - return utils.NonCommittingEngine(conn) - - def isDirty(self): - return True - - -class Schema(object): - pass - - -class SchemaResource(testresources.TestResourceManager): - - def __init__(self, database_resource, generate_schema, teardown=False): - super(SchemaResource, self).__init__() - self.generate_schema = generate_schema - self.teardown = teardown - self.resources = [ - ('database', database_resource) - ] - - def clean(self, resource): - LOG.info( - "DROP ALL OBJECTS, BACKEND %s", - resource.database.engine.url) - resource.database.backend.drop_all_objects( - resource.database.engine) - - def make(self, dependency_resources): - if self.generate_schema: - self.generate_schema(dependency_resources['database'].engine) - return Schema() - - def isDirty(self): - if self.teardown: - return True - else: - return False - - -class Backend(object): - """Represent a particular database backend that may be provisionable. - - The ``Backend`` object maintains a database type (e.g. database without - specific driver type, such as "sqlite", "postgresql", etc.), - a target URL, a base ``Engine`` for that URL object that can be used - to provision databases and a ``BackendImpl`` which knows how to perform - operations against this type of ``Engine``. - - """ - - backends_by_database_type = {} - - def __init__(self, database_type, url): - self.database_type = database_type - self.url = url - self.verified = False - self.engine = None - self.impl = BackendImpl.impl(database_type) - self.current_dbs = set() - - @classmethod - def backend_for_database_type(cls, database_type): - """Return the ``Backend`` for the given database type. - - """ - try: - backend = cls.backends_by_database_type[database_type] - except KeyError: - raise exception.BackendNotAvailable( - "Backend '%s' is unavailable: No such backend" % database_type) - else: - return backend._verify() - - @classmethod - def all_viable_backends(cls): - """Return an iterator of all ``Backend`` objects that are present - - and provisionable. - - """ - - for backend in cls.backends_by_database_type.values(): - try: - yield backend._verify() - except exception.BackendNotAvailable: - pass - - def _verify(self): - """Verify that this ``Backend`` is available and provisionable. - - :return: this ``Backend`` - - :raises: ``BackendNotAvailable`` if the backend is not available. - - """ - - if not self.verified: - try: - eng = self._ensure_backend_available(self.url) - except exception.BackendNotAvailable as bne: - self._no_engine_reason = str(bne) - raise - else: - self.engine = eng - finally: - self.verified = True - if self.engine is None: - raise exception.BackendNotAvailable(self._no_engine_reason) - return self - - @classmethod - def _ensure_backend_available(cls, url): - url = sa_url.make_url(str(url)) - try: - eng = sqlalchemy.create_engine(url) - except ImportError as i_e: - # SQLAlchemy performs an "import" of the DBAPI module - # within create_engine(). So if ibm_db_sa, cx_oracle etc. - # isn't installed, we get an ImportError here. - LOG.info( - _LI("The %(dbapi)s backend is unavailable: %(err)s"), - dict(dbapi=url.drivername, err=i_e)) - raise exception.BackendNotAvailable( - "Backend '%s' is unavailable: No DBAPI installed" % - url.drivername) - else: - try: - conn = eng.connect() - except sqlalchemy.exc.DBAPIError as d_e: - # upon connect, SQLAlchemy calls dbapi.connect(). This - # usually raises OperationalError and should always at - # least raise a SQLAlchemy-wrapped DBAPI Error. - LOG.info( - _LI("The %(dbapi)s backend is unavailable: %(err)s"), - dict(dbapi=url.drivername, err=d_e) - ) - raise exception.BackendNotAvailable( - "Backend '%s' is unavailable: Could not connect" % - url.drivername) - else: - conn.close() - return eng - - def create_named_database(self, ident, conditional=False): - """Create a database with the given name.""" - - if not conditional or ident not in self.current_dbs: - self.current_dbs.add(ident) - self.impl.create_named_database( - self.engine, ident, conditional=conditional) - - def drop_named_database(self, ident, conditional=False): - """Drop a database with the given name.""" - - self.impl.drop_named_database( - self.engine, ident, - conditional=conditional) - self.current_dbs.discard(ident) - - def drop_all_objects(self, engine): - """Drop all database objects. - - Drops all database objects remaining on the default schema of the - given engine. - - """ - self.impl.drop_all_objects(engine) - - def database_exists(self, ident): - """Return True if a database of the given name exists.""" - - return self.impl.database_exists(self.engine, ident) - - def provisioned_engine(self, ident): - """Given the URL of a particular database backend and the string - - name of a particular 'database' within that backend, return - an Engine instance whose connections will refer directly to the - named database. - - For hostname-based URLs, this typically involves switching just the - 'database' portion of the URL with the given name and creating - an engine. - - For URLs that instead deal with DSNs, the rules may be more custom; - for example, the engine may need to connect to the root URL and - then emit a command to switch to the named database. - - """ - return self.impl.provisioned_engine(self.url, ident) - - @classmethod - def _setup(cls): - """Initial startup feature will scan the environment for configured - - URLs and place them into the list of URLs we will use for provisioning. - - This searches through OS_TEST_DBAPI_ADMIN_CONNECTION for URLs. If - not present, we set up URLs based on the "opportunstic" convention, - e.g. username+password = "openstack_citest". - - The provisioning system will then use or discard these URLs as they - are requested, based on whether or not the target database is actually - found to be available. - - """ - configured_urls = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', None) - if configured_urls: - configured_urls = configured_urls.split(";") - else: - configured_urls = [ - impl.create_opportunistic_driver_url() - for impl in BackendImpl.all_impls() - ] - - for url_str in configured_urls: - url = sa_url.make_url(url_str) - m = re.match(r'([^+]+?)(?:\+(.+))?$', url.drivername) - database_type = m.group(1) - Backend.backends_by_database_type[database_type] = \ - Backend(database_type, url) - - -@six.add_metaclass(abc.ABCMeta) -class BackendImpl(object): - """Provide database-specific implementations of key provisioning - - functions. - - ``BackendImpl`` is owned by a ``Backend`` instance which delegates - to it for all database-specific features. - - """ - - default_engine_kwargs = {} - - supports_drop_fk = True - - @classmethod - def all_impls(cls): - """Return an iterator of all possible BackendImpl objects. - - These are BackendImpls that are implemented, but not - necessarily provisionable. - - """ - for database_type in cls.impl.reg: - if database_type == '*': - continue - yield BackendImpl.impl(database_type) - - @utils.dispatch_for_dialect("*") - def impl(drivername): - """Return a ``BackendImpl`` instance corresponding to the - - given driver name. - - This is a dispatched method which will refer to the constructor - of implementing subclasses. - - """ - raise NotImplementedError( - "No provision impl available for driver: %s" % drivername) - - def __init__(self, drivername): - self.drivername = drivername - - @abc.abstractmethod - def create_opportunistic_driver_url(self): - """Produce a string url known as the 'opportunistic' URL. - - This URL is one that corresponds to an established Openstack - convention for a pre-established database login, which, when - detected as available in the local environment, is automatically - used as a test platform for a specific type of driver. - - """ - - @abc.abstractmethod - def create_named_database(self, engine, ident, conditional=False): - """Create a database with the given name.""" - - @abc.abstractmethod - def drop_named_database(self, engine, ident, conditional=False): - """Drop a database with the given name.""" - - def drop_all_objects(self, engine): - """Drop all database objects. - - Drops all database objects remaining on the default schema of the - given engine. - - Per-db implementations will also need to drop items specific to those - systems, such as sequences, custom types (e.g. pg ENUM), etc. - - """ - - with engine.begin() as conn: - inspector = sqlalchemy.inspect(engine) - metadata = schema.MetaData() - tbs = [] - all_fks = [] - - for table_name in inspector.get_table_names(): - fks = [] - for fk in inspector.get_foreign_keys(table_name): - # note that SQLite reflection does not have names - # for foreign keys until SQLAlchemy 1.0 - if not fk['name']: - continue - fks.append( - schema.ForeignKeyConstraint((), (), name=fk['name']) - ) - table = schema.Table(table_name, metadata, *fks) - tbs.append(table) - all_fks.extend(fks) - - if self.supports_drop_fk: - for fkc in all_fks: - conn.execute(schema.DropConstraint(fkc)) - - for table in tbs: - conn.execute(schema.DropTable(table)) - - self.drop_additional_objects(conn) - - def drop_additional_objects(self, conn): - pass - - def provisioned_engine(self, base_url, ident): - """Return a provisioned engine. - - Given the URL of a particular database backend and the string - name of a particular 'database' within that backend, return - an Engine instance whose connections will refer directly to the - named database. - - For hostname-based URLs, this typically involves switching just the - 'database' portion of the URL with the given name and creating - an engine. - - For URLs that instead deal with DSNs, the rules may be more custom; - for example, the engine may need to connect to the root URL and - then emit a command to switch to the named database. - - """ - - url = sa_url.make_url(str(base_url)) - url.database = ident - return session.create_engine( - url, - logging_name="%s@%s" % (self.drivername, ident), - **self.default_engine_kwargs - ) - - -@BackendImpl.impl.dispatch_for("mysql") -class MySQLBackendImpl(BackendImpl): - - default_engine_kwargs = {'mysql_sql_mode': 'TRADITIONAL'} - - def create_opportunistic_driver_url(self): - return "mysql+pymysql://openstack_citest:openstack_citest@localhost/" - - def create_named_database(self, engine, ident, conditional=False): - with engine.connect() as conn: - if not conditional or not self.database_exists(conn, ident): - conn.execute("CREATE DATABASE %s" % ident) - - def drop_named_database(self, engine, ident, conditional=False): - with engine.connect() as conn: - if not conditional or self.database_exists(conn, ident): - conn.execute("DROP DATABASE %s" % ident) - - def database_exists(self, engine, ident): - return bool(engine.scalar("SHOW DATABASES LIKE '%s'" % ident)) - - -@BackendImpl.impl.dispatch_for("sqlite") -class SQLiteBackendImpl(BackendImpl): - - supports_drop_fk = False - - def create_opportunistic_driver_url(self): - return "sqlite://" - - def create_named_database(self, engine, ident, conditional=False): - url = self._provisioned_database_url(engine.url, ident) - filename = url.database - if filename and (not conditional or not os.access(filename, os.F_OK)): - eng = sqlalchemy.create_engine(url) - eng.connect().close() - - def provisioned_engine(self, base_url, ident): - return session.create_engine( - self._provisioned_database_url(base_url, ident)) - - def drop_named_database(self, engine, ident, conditional=False): - url = self._provisioned_database_url(engine.url, ident) - filename = url.database - if filename and (not conditional or os.access(filename, os.F_OK)): - os.remove(filename) - - def database_exists(self, engine, ident): - url = self._provisioned_database_url(engine.url, ident) - filename = url.database - return not filename or os.access(filename, os.F_OK) - - def _provisioned_database_url(self, base_url, ident): - if base_url.database: - return sa_url.make_url("sqlite:////tmp/%s.db" % ident) - else: - return base_url - - -@BackendImpl.impl.dispatch_for("postgresql") -class PostgresqlBackendImpl(BackendImpl): - def create_opportunistic_driver_url(self): - return "postgresql://openstack_citest:openstack_citest"\ - "@localhost/postgres" - - def create_named_database(self, engine, ident, conditional=False): - with engine.connect().execution_options( - isolation_level="AUTOCOMMIT") as conn: - if not conditional or not self.database_exists(conn, ident): - conn.execute("CREATE DATABASE %s" % ident) - - def drop_named_database(self, engine, ident, conditional=False): - with engine.connect().execution_options( - isolation_level="AUTOCOMMIT") as conn: - self._close_out_database_users(conn, ident) - if conditional: - conn.execute("DROP DATABASE IF EXISTS %s" % ident) - else: - conn.execute("DROP DATABASE %s" % ident) - - def drop_additional_objects(self, conn): - enums = [e['name'] for e in sqlalchemy.inspect(conn).get_enums()] - - for e in enums: - conn.execute("DROP TYPE %s" % e) - - def database_exists(self, engine, ident): - return bool( - engine.scalar( - sqlalchemy.text( - "select datname from pg_database " - "where datname=:name"), name=ident) - ) - - def _close_out_database_users(self, conn, ident): - """Attempt to guarantee a database can be dropped. - - Optional feature which guarantees no connections with our - username are attached to the DB we're going to drop. - - This method has caveats; for one, the 'pid' column was named - 'procpid' prior to Postgresql 9.2. But more critically, - prior to 9.2 this operation required superuser permissions, - even if the connections we're closing are under the same username - as us. In more recent versions this restriction has been - lifted for same-user connections. - - """ - if conn.dialect.server_version_info >= (9, 2): - conn.execute( - sqlalchemy.text( - "select pg_terminate_backend(pid) " - "from pg_stat_activity " - "where usename=current_user and " - "pid != pg_backend_pid() " - "and datname=:dname" - ), dname=ident) - - -def _random_ident(): - return ''.join( - random.choice(string.ascii_lowercase) - for i in moves.range(10)) - - -Backend._setup() diff --git a/oslo_db/sqlalchemy/session.py b/oslo_db/sqlalchemy/session.py deleted file mode 100644 index bb3616f..0000000 --- a/oslo_db/sqlalchemy/session.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Session Handling for SQLAlchemy backend. - -Recommended ways to use sessions within this framework: - -* Use the ``enginefacade`` system for connectivity, session and - transaction management: - - .. code-block:: python - - from oslo_db.sqlalchemy import enginefacade - - @enginefacade.reader - def get_foo(context, foo): - return (model_query(models.Foo, context.session). - filter_by(foo=foo). - first()) - - @enginefacade.writer - def update_foo(context, id, newfoo): - (model_query(models.Foo, context.session). - filter_by(id=id). - update({'foo': newfoo})) - - @enginefacade.writer - def create_foo(context, values): - foo_ref = models.Foo() - foo_ref.update(values) - foo_ref.save(context.session) - return foo_ref - - In the above system, transactions are committed automatically, and - are shared among all dependent database methods. Ensure - that methods which "write" data are enclosed within @writer blocks. - - .. note:: Statements in the session scope will not be automatically retried. - -* If you create models within the session, they need to be added, but you - do not need to call `model.save()`: - - .. code-block:: python - - @enginefacade.writer - def create_many_foo(context, foos): - for foo in foos: - foo_ref = models.Foo() - foo_ref.update(foo) - context.session.add(foo_ref) - - @enginefacade.writer - def update_bar(context, foo_id, newbar): - foo_ref = (model_query(models.Foo, context.session). - filter_by(id=foo_id). - first()) - (model_query(models.Bar, context.session). - filter_by(id=foo_ref['bar_id']). - update({'bar': newbar})) - - The two queries in `update_bar` can alternatively be expressed using - a single query, which may be more efficient depending on scenario: - - .. code-block:: python - - @enginefacade.writer - def update_bar(context, foo_id, newbar): - subq = (model_query(models.Foo.id, context.session). - filter_by(id=foo_id). - limit(1). - subquery()) - (model_query(models.Bar, context.session). - filter_by(id=subq.as_scalar()). - update({'bar': newbar})) - - For reference, this emits approximately the following SQL statement: - - .. code-block:: sql - - UPDATE bar SET bar = ${newbar} - WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); - - .. note:: `create_duplicate_foo` is a trivially simple example of catching an - exception while using a savepoint. Here we create two duplicate - instances with same primary key, must catch the exception out of context - managed by a single session: - - .. code-block:: python - - @enginefacade.writer - def create_duplicate_foo(context): - foo1 = models.Foo() - foo2 = models.Foo() - foo1.id = foo2.id = 1 - try: - with context.session.begin_nested(): - session.add(foo1) - session.add(foo2) - except exception.DBDuplicateEntry as e: - handle_error(e) - -* The enginefacade system eliminates the need to decide when sessions need - to be passed between methods. All methods should instead share a common - context object; the enginefacade system will maintain the transaction - across method calls. - - .. code-block:: python - - @enginefacade.writer - def myfunc(context, foo): - # do some database things - bar = _private_func(context, foo) - return bar - - def _private_func(context, foo): - with enginefacade.using_writer(context) as session: - # do some other database things - session.add(SomeObject()) - return bar - - -* Avoid ``with_lockmode('UPDATE')`` when possible. - - FOR UPDATE is not compatible with MySQL/Galera. Instead, an "opportunistic" - approach should be used, such that if an UPDATE fails, the entire - transaction should be retried. The @wrap_db_retry decorator is one - such system that can be used to achieve this. - -Enabling soft deletes: - -* To use/enable soft-deletes, `SoftDeleteMixin` may be used. For example: - - .. code-block:: python - - class NovaBase(models.SoftDeleteMixin, models.ModelBase): - pass - - -Efficient use of soft deletes: - -* While there is a ``model.soft_delete()`` method, prefer - ``query.soft_delete()``. Some examples: - - .. code-block:: python - - @enginefacade.writer - def soft_delete_bar(context): - # synchronize_session=False will prevent the ORM from attempting - # to search the Session for instances matching the DELETE; - # this is typically not necessary for small operations. - count = model_query(BarModel, context.session).\\ - find(some_condition).soft_delete(synchronize_session=False) - if count == 0: - raise Exception("0 entries were soft deleted") - - @enginefacade.writer - def complex_soft_delete_with_synchronization_bar(context): - # use synchronize_session='evaluate' when you'd like to attempt - # to update the state of the Session to match that of the DELETE. - # This is potentially helpful if the operation is complex and - # continues to work with instances that were loaded, though - # not usually needed. - count = (model_query(BarModel, context.session). - find(some_condition). - soft_delete(synchronize_session='evaulate')) - if count == 0: - raise Exception("0 entries were soft deleted") - - -""" - -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import engines -from oslo_db.sqlalchemy import orm - -EngineFacade = enginefacade.LegacyEngineFacade -create_engine = engines.create_engine -get_maker = orm.get_maker -Query = orm.Query -Session = orm.Session - - -__all__ = ["EngineFacade", "create_engine", "get_maker", "Query", "Session"] diff --git a/oslo_db/sqlalchemy/test_base.py b/oslo_db/sqlalchemy/test_base.py deleted file mode 100644 index f25d266..0000000 --- a/oslo_db/sqlalchemy/test_base.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import testresources -import testscenarios - -try: - from oslotest import base as test_base -except ImportError: - raise NameError('Oslotest is not installed. Please add oslotest in your' - ' test-requirements') - - -import os - -from oslo_utils import reflection -import six - -from oslo_db import exception -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import provision -from oslo_db.sqlalchemy import session - - -class DbFixture(fixtures.Fixture): - """Basic database fixture. - - Allows to run tests on various db backends, such as SQLite, MySQL and - PostgreSQL. By default use sqlite backend. To override default backend - uri set env variable OS_TEST_DBAPI_ADMIN_CONNECTION with database admin - credentials for specific backend. - """ - - DRIVER = "sqlite" - - # these names are deprecated, and are not used by DbFixture. - # they are here for backwards compatibility with test suites that - # are referring to them directly. - DBNAME = PASSWORD = USERNAME = 'openstack_citest' - - def __init__(self, test, skip_on_unavailable_db=True): - super(DbFixture, self).__init__() - self.test = test - self.skip_on_unavailable_db = skip_on_unavailable_db - - def setUp(self): - super(DbFixture, self).setUp() - - testresources.setUpResources( - self.test, self.test.resources, testresources._get_result()) - self.addCleanup( - testresources.tearDownResources, - self.test, self.test.resources, testresources._get_result() - ) - - if not self.test._has_db_resource(): - msg = self.test._get_db_resource_not_available_reason() - if self.test.SKIP_ON_UNAVAILABLE_DB: - self.test.skip(msg) - else: - self.test.fail(msg) - - if self.test.SCHEMA_SCOPE: - self.test.engine = self.test.transaction_engine - self.test.sessionmaker = session.get_maker( - self.test.transaction_engine) - else: - self.test.engine = self.test.db.engine - self.test.sessionmaker = session.get_maker(self.test.engine) - - self.addCleanup(setattr, self.test, 'sessionmaker', None) - self.addCleanup(setattr, self.test, 'engine', None) - - self.test.enginefacade = enginefacade._TestTransactionFactory( - self.test.engine, self.test.sessionmaker, apply_global=True, - synchronous_reader=True) - self.addCleanup(self.test.enginefacade.dispose_global) - - -class DbTestCase(test_base.BaseTestCase): - """Base class for testing of DB code. - - """ - - FIXTURE = DbFixture - SCHEMA_SCOPE = None - SKIP_ON_UNAVAILABLE_DB = True - - _db_not_available = {} - _schema_resources = {} - _database_resources = {} - - def _get_db_resource_not_available_reason(self): - return self._db_not_available.get(self.FIXTURE.DRIVER, None) - - def _has_db_resource(self): - return self._database_resources.get( - self.FIXTURE.DRIVER, None) is not None - - def _resources_for_driver(self, driver, schema_scope, generate_schema): - # testresources relies on the identity and state of the - # TestResourceManager objects in play to correctly manage - # resources, and it also hardcodes to looking at the - # ".resources" attribute on the test object, even though the - # setUpResources() function passes the list of resources in, - # so we have to code the TestResourceManager logic into the - # .resources attribute and ensure that the same set of test - # variables always produces the same TestResourceManager objects. - - if driver not in self._database_resources: - try: - self._database_resources[driver] = \ - provision.DatabaseResource(driver) - except exception.BackendNotAvailable as bne: - self._database_resources[driver] = None - self._db_not_available[driver] = str(bne) - - database_resource = self._database_resources[driver] - if database_resource is None: - return [] - - if schema_scope: - key = (driver, schema_scope) - if key not in self._schema_resources: - schema_resource = provision.SchemaResource( - database_resource, generate_schema) - - transaction_resource = provision.TransactionResource( - database_resource, schema_resource) - - self._schema_resources[key] = \ - transaction_resource - - transaction_resource = self._schema_resources[key] - - return [ - ('transaction_engine', transaction_resource), - ('db', database_resource), - ] - else: - key = (driver, None) - if key not in self._schema_resources: - self._schema_resources[key] = provision.SchemaResource( - database_resource, generate_schema, teardown=True) - - schema_resource = self._schema_resources[key] - return [ - ('schema', schema_resource), - ('db', database_resource) - ] - - @property - def resources(self): - return self._resources_for_driver( - self.FIXTURE.DRIVER, self.SCHEMA_SCOPE, self.generate_schema) - - def setUp(self): - super(DbTestCase, self).setUp() - self.useFixture( - self.FIXTURE( - self, skip_on_unavailable_db=self.SKIP_ON_UNAVAILABLE_DB)) - - def generate_schema(self, engine): - """Generate schema objects to be used within a test. - - The function is separate from the setUp() case as the scope - of this method is controlled by the provisioning system. A - test that specifies SCHEMA_SCOPE may not call this method - for each test, as the schema may be maintained from a previous run. - - """ - if self.SCHEMA_SCOPE: - # if SCHEMA_SCOPE is set, then this method definitely - # has to be implemented. This is a guard against a test - # that inadvertently does schema setup within setUp(). - raise NotImplementedError( - "This test requires schema-level setup to be " - "implemented within generate_schema().") - - -class OpportunisticTestCase(DbTestCase): - """Placeholder for backwards compatibility.""" - -ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] - - -def backend_specific(*dialects): - """Decorator to skip backend specific tests on inappropriate engines. - - ::dialects: list of dialects names under which the test will be launched. - """ - def wrap(f): - @six.wraps(f) - def ins_wrap(self): - if not set(dialects).issubset(ALLOWED_DIALECTS): - raise ValueError( - "Please use allowed dialects: %s" % ALLOWED_DIALECTS) - if self.engine.name not in dialects: - msg = ('The test "%s" can be run ' - 'only on %s. Current engine is %s.') - args = (reflection.get_callable_name(f), ', '.join(dialects), - self.engine.name) - self.skip(msg % args) - else: - return f(self) - return ins_wrap - return wrap - - -class MySQLOpportunisticFixture(DbFixture): - DRIVER = 'mysql' - - -class PostgreSQLOpportunisticFixture(DbFixture): - DRIVER = 'postgresql' - - -class MySQLOpportunisticTestCase(OpportunisticTestCase): - FIXTURE = MySQLOpportunisticFixture - - -class PostgreSQLOpportunisticTestCase(OpportunisticTestCase): - FIXTURE = PostgreSQLOpportunisticFixture - - -def optimize_db_test_loader(file_): - """Package level load_tests() function. - - Will apply an optimizing test suite to all sub-tests, which groups DB - tests and other resources appropriately. - - Place this in an __init__.py package file within the root of the test - suite, at the level where testresources loads it as a package:: - - from oslo_db.sqlalchemy import test_base - - load_tests = test_base.optimize_db_test_loader(__file__) - - Alternatively, the directive can be placed into a test module directly. - - """ - - this_dir = os.path.dirname(file_) - - def load_tests(loader, found_tests, pattern): - # pattern is None if the directive is placed within - # a test module directly, as well as within certain test - # discovery patterns - - if pattern is not None: - pkg_tests = loader.discover(start_dir=this_dir, pattern=pattern) - - result = testresources.OptimisingTestSuite() - found_tests = testscenarios.load_tests_apply_scenarios( - loader, found_tests, pattern) - result.addTest(found_tests) - - if pattern is not None: - result.addTest(pkg_tests) - return result - return load_tests diff --git a/oslo_db/sqlalchemy/test_migrations.py b/oslo_db/sqlalchemy/test_migrations.py deleted file mode 100644 index 524a339..0000000 --- a/oslo_db/sqlalchemy/test_migrations.py +++ /dev/null @@ -1,618 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2012-2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections -import functools -import logging -import pprint - -import alembic -import alembic.autogenerate -import alembic.migration -import pkg_resources as pkg -import six -import sqlalchemy -import sqlalchemy.exc -import sqlalchemy.sql.expression as expr -import sqlalchemy.types as types - -from oslo_db._i18n import _LE -from oslo_db import exception as exc -from oslo_db.sqlalchemy import utils - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class WalkVersionsMixin(object): - """Test mixin to check upgrade and downgrade ability of migration. - - This is only suitable for testing of migrate_ migration scripts. An - abstract class mixin. `INIT_VERSION`, `REPOSITORY` and `migration_api` - attributes must be implemented in subclasses. - - .. _auxiliary-dynamic-methods: Auxiliary Methods - - Auxiliary Methods: - - `migrate_up` and `migrate_down` instance methods of the class can be - used with auxiliary methods named `_pre_upgrade_`, - `_check_`, `_post_downgrade_`. The methods - intended to check applied changes for correctness of data operations. - This methods should be implemented for every particular revision - which you want to check with data. Implementation recommendations for - `_pre_upgrade_`, `_check_`, - `_post_downgrade_` implementation: - - * `_pre_upgrade_`: provide a data appropriate to - a next revision. Should be used an id of revision which - going to be applied. - - * `_check_`: Insert, select, delete operations - with newly applied changes. The data provided by - `_pre_upgrade_` will be used. - - * `_post_downgrade_`: check for absence - (inability to use) changes provided by reverted revision. - - Execution order of auxiliary methods when revision is upgrading: - - `_pre_upgrade_###` => `upgrade` => `_check_###` - - Execution order of auxiliary methods when revision is downgrading: - - `downgrade` => `_post_downgrade_###` - - .. _migrate: https://sqlalchemy-migrate.readthedocs.org/en/latest/ - - """ - - @abc.abstractproperty - def INIT_VERSION(self): - """Initial version of a migration repository. - - Can be different from 0, if a migrations were squashed. - - :rtype: int - """ - pass - - @abc.abstractproperty - def REPOSITORY(self): - """Allows basic manipulation with migration repository. - - :returns: `migrate.versioning.repository.Repository` subclass. - """ - pass - - @abc.abstractproperty - def migration_api(self): - """Provides API for upgrading, downgrading and version manipulations. - - :returns: `migrate.api` or overloaded analog. - """ - pass - - @abc.abstractproperty - def migrate_engine(self): - """Provides engine instance. - - Should be the same instance as used when migrations are applied. In - most cases, the `engine` attribute provided by the test class in a - `setUp` method will work. - - Example of implementation: - - def migrate_engine(self): - return self.engine - - :returns: sqlalchemy engine instance - """ - pass - - def _walk_versions(self, snake_walk=False, downgrade=True): - """Check if migration upgrades and downgrades successfully. - - DEPRECATED: this function is deprecated and will be removed from - oslo.db in a few releases. Please use walk_versions() method instead. - """ - self.walk_versions(snake_walk, downgrade) - - def _migrate_down(self, version, with_data=False): - """Migrate down to a previous version of the db. - - DEPRECATED: this function is deprecated and will be removed from - oslo.db in a few releases. Please use migrate_down() method instead. - """ - return self.migrate_down(version, with_data) - - def _migrate_up(self, version, with_data=False): - """Migrate up to a new version of the db. - - DEPRECATED: this function is deprecated and will be removed from - oslo.db in a few releases. Please use migrate_up() method instead. - """ - self.migrate_up(version, with_data) - - def walk_versions(self, snake_walk=False, downgrade=True): - """Check if migration upgrades and downgrades successfully. - - Determine the latest version script from the repo, then - upgrade from 1 through to the latest, with no data - in the databases. This just checks that the schema itself - upgrades successfully. - - `walk_versions` calls `migrate_up` and `migrate_down` with - `with_data` argument to check changes with data, but these methods - can be called without any extra check outside of `walk_versions` - method. - - :param snake_walk: enables checking that each individual migration can - be upgraded/downgraded by itself. - - If we have ordered migrations 123abc, 456def, 789ghi and we run - upgrading with the `snake_walk` argument set to `True`, the - migrations will be applied in the following order:: - - `123abc => 456def => 123abc => - 456def => 789ghi => 456def => 789ghi` - - :type snake_walk: bool - :param downgrade: Check downgrade behavior if True. - :type downgrade: bool - """ - - # Place the database under version control - self.migration_api.version_control(self.migrate_engine, - self.REPOSITORY, - self.INIT_VERSION) - self.assertEqual(self.INIT_VERSION, - self.migration_api.db_version(self.migrate_engine, - self.REPOSITORY)) - - LOG.debug('latest version is %s', self.REPOSITORY.latest) - versions = range(int(self.INIT_VERSION) + 1, - int(self.REPOSITORY.latest) + 1) - - for version in versions: - # upgrade -> downgrade -> upgrade - self.migrate_up(version, with_data=True) - if snake_walk: - downgraded = self.migrate_down(version - 1, with_data=True) - if downgraded: - self.migrate_up(version) - - if downgrade: - # Now walk it back down to 0 from the latest, testing - # the downgrade paths. - for version in reversed(versions): - # downgrade -> upgrade -> downgrade - downgraded = self.migrate_down(version - 1) - - if snake_walk and downgraded: - self.migrate_up(version) - self.migrate_down(version - 1) - - def migrate_down(self, version, with_data=False): - """Migrate down to a previous version of the db. - - :param version: id of revision to downgrade. - :type version: str - :keyword with_data: Whether to verify the absence of changes from - migration(s) being downgraded, see - :ref:`auxiliary-dynamic-methods `. - :type with_data: Bool - """ - - try: - self.migration_api.downgrade(self.migrate_engine, - self.REPOSITORY, version) - except NotImplementedError: - # NOTE(sirp): some migrations, namely release-level - # migrations, don't support a downgrade. - return False - - self.assertEqual(version, self.migration_api.db_version( - self.migrate_engine, self.REPOSITORY)) - - # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' - # version). So if we have any downgrade checks, they need to be run for - # the previous (higher numbered) migration. - if with_data: - post_downgrade = getattr( - self, "_post_downgrade_%03d" % (version + 1), None) - if post_downgrade: - post_downgrade(self.migrate_engine) - - return True - - def migrate_up(self, version, with_data=False): - """Migrate up to a new version of the db. - - :param version: id of revision to upgrade. - :type version: str - :keyword with_data: Whether to verify the applied changes with data, - see :ref:`auxiliary-dynamic-methods `. - :type with_data: Bool - """ - # NOTE(sdague): try block is here because it's impossible to debug - # where a failed data migration happens otherwise - try: - if with_data: - data = None - pre_upgrade = getattr( - self, "_pre_upgrade_%03d" % version, None) - if pre_upgrade: - data = pre_upgrade(self.migrate_engine) - - self.migration_api.upgrade(self.migrate_engine, - self.REPOSITORY, version) - self.assertEqual(version, - self.migration_api.db_version(self.migrate_engine, - self.REPOSITORY)) - if with_data: - check = getattr(self, "_check_%03d" % version, None) - if check: - check(self.migrate_engine, data) - except exc.DBMigrationError: - msg = _LE("Failed to migrate to version %(ver)s on engine %(eng)s") - LOG.error(msg, {"ver": version, "eng": self.migrate_engine}) - raise - - -@six.add_metaclass(abc.ABCMeta) -class ModelsMigrationsSync(object): - """A helper class for comparison of DB migration scripts and models. - - It's intended to be inherited by test cases in target projects. They have - to provide implementations for methods used internally in the test (as - we have no way to implement them here). - - test_model_sync() will run migration scripts for the engine provided and - then compare the given metadata to the one reflected from the database. - The difference between MODELS and MIGRATION scripts will be printed and - the test will fail, if the difference is not empty. The return value is - really a list of actions, that should be performed in order to make the - current database schema state (i.e. migration scripts) consistent with - models definitions. It's left up to developers to analyze the output and - decide whether the models definitions or the migration scripts should be - modified to make them consistent. - - Output:: - - [( - 'add_table', - description of the table from models - ), - ( - 'remove_table', - description of the table from database - ), - ( - 'add_column', - schema, - table name, - column description from models - ), - ( - 'remove_column', - schema, - table name, - column description from database - ), - ( - 'add_index', - description of the index from models - ), - ( - 'remove_index', - description of the index from database - ), - ( - 'add_constraint', - description of constraint from models - ), - ( - 'remove_constraint, - description of constraint from database - ), - ( - 'modify_nullable', - schema, - table name, - column name, - { - 'existing_type': type of the column from database, - 'existing_server_default': default value from database - }, - nullable from database, - nullable from models - ), - ( - 'modify_type', - schema, - table name, - column name, - { - 'existing_nullable': database nullable, - 'existing_server_default': default value from database - }, - database column type, - type of the column from models - ), - ( - 'modify_default', - schema, - table name, - column name, - { - 'existing_nullable': database nullable, - 'existing_type': type of the column from database - }, - connection column default value, - default from models - )] - - Method include_object() can be overridden to exclude some tables from - comparison (e.g. migrate_repo). - - """ - - @abc.abstractmethod - def db_sync(self, engine): - """Run migration scripts with the given engine instance. - - This method must be implemented in subclasses and run migration scripts - for a DB the given engine is connected to. - - """ - - @abc.abstractmethod - def get_engine(self): - """Return the engine instance to be used when running tests. - - This method must be implemented in subclasses and return an engine - instance to be used when running tests. - - """ - - @abc.abstractmethod - def get_metadata(self): - """Return the metadata instance to be used for schema comparison. - - This method must be implemented in subclasses and return the metadata - instance attached to the BASE model. - - """ - - def include_object(self, object_, name, type_, reflected, compare_to): - """Return True for objects that should be compared. - - :param object_: a SchemaItem object such as a Table or Column object - :param name: the name of the object - :param type_: a string describing the type of object (e.g. "table") - :param reflected: True if the given object was produced based on - table reflection, False if it's from a local - MetaData object - :param compare_to: the object being compared against, if available, - else None - - """ - - return True - - def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type): - """Return True if types are different, False if not. - - Return None to allow the default implementation to compare these types. - - :param ctxt: alembic MigrationContext instance - :param insp_col: reflected column - :param meta_col: column from model - :param insp_type: reflected column type - :param meta_type: column type from model - - """ - - # some backends (e.g. mysql) don't provide native boolean type - BOOLEAN_METADATA = (types.BOOLEAN, types.Boolean) - BOOLEAN_SQL = BOOLEAN_METADATA + (types.INTEGER, types.Integer) - - if issubclass(type(meta_type), BOOLEAN_METADATA): - return not issubclass(type(insp_type), BOOLEAN_SQL) - - # Alembic <=0.8.4 do not contain logic of comparing Variant type with - # others. - if isinstance(meta_type, types.Variant): - orig_type = meta_col.type - impl_type = meta_type.load_dialect_impl(ctxt.dialect) - meta_col.type = impl_type - try: - return self.compare_type(ctxt, insp_col, meta_col, insp_type, - impl_type) - finally: - meta_col.type = orig_type - - return ctxt.impl.compare_type(insp_col, meta_col) - - def compare_server_default(self, ctxt, ins_col, meta_col, - insp_def, meta_def, rendered_meta_def): - """Compare default values between model and db table. - - Return True if the defaults are different, False if not, or None to - allow the default implementation to compare these defaults. - - :param ctxt: alembic MigrationContext instance - :param insp_col: reflected column - :param meta_col: column from model - :param insp_def: reflected column default value - :param meta_def: column default value from model - :param rendered_meta_def: rendered column default value (from model) - - """ - return self._compare_server_default(ctxt.bind, meta_col, insp_def, - meta_def) - - @utils.DialectFunctionDispatcher.dispatch_for_dialect("*") - def _compare_server_default(bind, meta_col, insp_def, meta_def): - pass - - @_compare_server_default.dispatch_for('mysql') - def _compare_server_default(bind, meta_col, insp_def, meta_def): - if isinstance(meta_col.type, sqlalchemy.Boolean): - if meta_def is None or insp_def is None: - return meta_def != insp_def - return not ( - isinstance(meta_def.arg, expr.True_) and insp_def == "'1'" or - isinstance(meta_def.arg, expr.False_) and insp_def == "'0'" - ) - - impl_type = meta_col.type - if isinstance(impl_type, types.Variant): - impl_type = impl_type.load_dialect_impl(bind.dialect) - if isinstance(impl_type, (sqlalchemy.Integer, sqlalchemy.BigInteger)): - if meta_def is None or insp_def is None: - return meta_def != insp_def - return meta_def.arg != insp_def.split("'")[1] - - @_compare_server_default.dispatch_for('postgresql') - def _compare_server_default(bind, meta_col, insp_def, meta_def): - if isinstance(meta_col.type, sqlalchemy.Enum): - if meta_def is None or insp_def is None: - return meta_def != insp_def - return insp_def != "'%s'::%s" % (meta_def.arg, meta_col.type.name) - elif isinstance(meta_col.type, sqlalchemy.String): - if meta_def is None or insp_def is None: - return meta_def != insp_def - return insp_def != "'%s'::character varying" % meta_def.arg - - FKInfo = collections.namedtuple('fk_info', ['constrained_columns', - 'referred_table', - 'referred_columns']) - - def check_foreign_keys(self, metadata, bind): - """Compare foreign keys between model and db table. - - :returns: a list that contains information about: - - * should be a new key added or removed existing, - * name of that key, - * source table, - * referred table, - * constrained columns, - * referred columns - - Output:: - - [('drop_key', - 'testtbl_fk_check_fkey', - 'testtbl', - fk_info(constrained_columns=(u'fk_check',), - referred_table=u'table', - referred_columns=(u'fk_check',)))] - - DEPRECATED: this function is deprecated and will be removed from - oslo.db in a few releases. Alembic autogenerate.compare_metadata() - now includes foreign key comparison directly. - - """ - - diff = [] - insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind) - # Get all tables from db - db_tables = insp.get_table_names() - # Get all tables from models - model_tables = metadata.tables - for table in db_tables: - if table not in model_tables: - continue - # Get all necessary information about key of current table from db - fk_db = dict((self._get_fk_info_from_db(i), i['name']) - for i in insp.get_foreign_keys(table)) - fk_db_set = set(fk_db.keys()) - # Get all necessary information about key of current table from - # models - fk_models = dict((self._get_fk_info_from_model(fk), fk) - for fk in model_tables[table].foreign_keys) - fk_models_set = set(fk_models.keys()) - for key in (fk_db_set - fk_models_set): - diff.append(('drop_key', fk_db[key], table, key)) - LOG.info(("Detected removed foreign key %(fk)r on " - "table %(table)r"), {'fk': fk_db[key], - 'table': table}) - for key in (fk_models_set - fk_db_set): - diff.append(('add_key', fk_models[key], table, key)) - LOG.info(( - "Detected added foreign key for column %(fk)r on table " - "%(table)r"), {'fk': fk_models[key].column.name, - 'table': table}) - return diff - - def _get_fk_info_from_db(self, fk): - return self.FKInfo(tuple(fk['constrained_columns']), - fk['referred_table'], - tuple(fk['referred_columns'])) - - def _get_fk_info_from_model(self, fk): - return self.FKInfo((fk.parent.name,), fk.column.table.name, - (fk.column.name,)) - - def filter_metadata_diff(self, diff): - """Filter changes before assert in test_models_sync(). - - Allow subclasses to whitelist/blacklist changes. By default, no - filtering is performed, changes are returned as is. - - :param diff: a list of differences (see `compare_metadata()` docs for - details on format) - :returns: a list of differences - - """ - - return diff - - def test_models_sync(self): - # recent versions of sqlalchemy and alembic are needed for running of - # this test, but we already have them in requirements - try: - pkg.require('sqlalchemy>=0.8.4', 'alembic>=0.6.2') - except (pkg.VersionConflict, pkg.DistributionNotFound) as e: - self.skipTest('sqlalchemy>=0.8.4 and alembic>=0.6.3 are required' - ' for running of this test: %s' % e) - - # drop all tables after a test run - self.addCleanup(functools.partial(self.db.backend.drop_all_objects, - self.get_engine())) - - # run migration scripts - self.db_sync(self.get_engine()) - - with self.get_engine().connect() as conn: - opts = { - 'include_object': self.include_object, - 'compare_type': self.compare_type, - 'compare_server_default': self.compare_server_default, - } - mc = alembic.migration.MigrationContext.configure(conn, opts=opts) - - # compare schemas and fail with diff, if it's not empty - diff = self.filter_metadata_diff( - alembic.autogenerate.compare_metadata(mc, self.get_metadata())) - if diff: - msg = pprint.pformat(diff, indent=2, width=20) - self.fail( - "Models and migration scripts aren't in sync:\n%s" % msg) diff --git a/oslo_db/sqlalchemy/types.py b/oslo_db/sqlalchemy/types.py deleted file mode 100644 index a6f8acb..0000000 --- a/oslo_db/sqlalchemy/types.py +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from sqlalchemy.types import TypeDecorator, Text -from sqlalchemy.dialects import mysql - - -class JsonEncodedType(TypeDecorator): - """Base column type for data serialized as JSON-encoded string in db.""" - type = None - impl = Text - - def __init__(self, mysql_as_long=False, mysql_as_medium=False): - super(JsonEncodedType, self).__init__() - - if mysql_as_long and mysql_as_medium: - raise TypeError("mysql_as_long and mysql_as_medium are mutually " - "exclusive") - - if mysql_as_long: - self.impl = Text().with_variant(mysql.LONGTEXT(), 'mysql') - elif mysql_as_medium: - self.impl = Text().with_variant(mysql.MEDIUMTEXT(), 'mysql') - - def process_bind_param(self, value, dialect): - if value is None: - if self.type is not None: - # Save default value according to current type to keep the - # interface consistent. - value = self.type() - elif self.type is not None and not isinstance(value, self.type): - raise TypeError("%s supposes to store %s objects, but %s given" - % (self.__class__.__name__, - self.type.__name__, - type(value).__name__)) - serialized_value = json.dumps(value) - return serialized_value - - def process_result_value(self, value, dialect): - if value is not None: - value = json.loads(value) - return value - - -class JsonEncodedDict(JsonEncodedType): - """Represents dict serialized as json-encoded string in db. - - Note that this type does NOT track mutations. If you want to update it, you - have to assign existing value to a temporary variable, update, then assign - back. See this page for more robust work around: - http://docs.sqlalchemy.org/en/rel_1_0/orm/extensions/mutable.html - """ - type = dict - - -class JsonEncodedList(JsonEncodedType): - """Represents list serialized as json-encoded string in db. - - Note that this type does NOT track mutations. If you want to update it, you - have to assign existing value to a temporary variable, update, then assign - back. See this page for more robust work around: - http://docs.sqlalchemy.org/en/rel_1_0/orm/extensions/mutable.html - """ - type = list diff --git a/oslo_db/sqlalchemy/update_match.py b/oslo_db/sqlalchemy/update_match.py deleted file mode 100644 index 5765817..0000000 --- a/oslo_db/sqlalchemy/update_match.py +++ /dev/null @@ -1,508 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from sqlalchemy import inspect -from sqlalchemy import orm -from sqlalchemy import sql -from sqlalchemy import types as sqltypes - -from oslo_db.sqlalchemy import utils - - -def update_on_match( - query, - specimen, - surrogate_key, - values=None, - attempts=3, - include_only=None, - process_query=None, - handle_failure=None -): - """Emit an UPDATE statement matching the given specimen. - - E.g.:: - - with enginefacade.writer() as session: - specimen = MyInstance( - uuid='ccea54f', - interface_id='ad33fea', - vm_state='SOME_VM_STATE', - ) - - values = { - 'vm_state': 'SOME_NEW_VM_STATE' - } - - base_query = model_query( - context, models.Instance, - project_only=True, session=session) - - hostname_query = model_query( - context, models.Instance, session=session, - read_deleted='no'). - filter(func.lower(models.Instance.hostname) == 'SOMEHOSTNAME') - - surrogate_key = ('uuid', ) - - def process_query(query): - return query.where(~exists(hostname_query)) - - def handle_failure(query): - try: - instance = base_query.one() - except NoResultFound: - raise exception.InstanceNotFound(instance_id=instance_uuid) - - if session.query(hostname_query.exists()).scalar(): - raise exception.InstanceExists( - name=values['hostname'].lower()) - - # try again - return False - - persistent_instance = base_query.update_on_match( - specimen, - surrogate_key, - values=values, - process_query=process_query, - handle_failure=handle_failure - ) - - The UPDATE statement is constructed against the given specimen - using those values which are present to construct a WHERE clause. - If the specimen contains additional values to be ignored, the - ``include_only`` parameter may be passed which indicates a sequence - of attributes to use when constructing the WHERE. - - The UPDATE is performed against an ORM Query, which is created from - the given ``Session``, or alternatively by passing the ```query`` - parameter referring to an existing query. - - Before the query is invoked, it is also passed through the callable - sent as ``process_query``, if present. This hook allows additional - criteria to be added to the query after it is created but before - invocation. - - The function will then invoke the UPDATE statement and check for - "success" one or more times, up to a maximum of that passed as - ``attempts``. - - The initial check for "success" from the UPDATE statement is that the - number of rows returned matches 1. If zero rows are matched, then - the UPDATE statement is assumed to have "failed", and the failure handling - phase begins. - - The failure handling phase involves invoking the given ``handle_failure`` - function, if any. This handler can perform additional queries to attempt - to figure out why the UPDATE didn't match any rows. The handler, - upon detection of the exact failure condition, should throw an exception - to exit; if it doesn't, it has the option of returning True or False, - where False means the error was not handled, and True means that there - was not in fact an error, and the function should return successfully. - - If the failure handler is not present, or returns False after ``attempts`` - number of attempts, then the function overall raises CantUpdateException. - If the handler returns True, then the function returns with no error. - - The return value of the function is a persistent version of the given - specimen; this may be the specimen itself, if no matching object were - already present in the session; otherwise, the existing object is - returned, with the state of the specimen merged into it. The returned - persistent object will have the given values populated into the object. - - The object is is returned as "persistent", meaning that it is - associated with the given - Session and has an identity key (that is, a real primary key - value). - - In order to produce this identity key, a strategy must be used to - determine it as efficiently and safely as possible: - - 1. If the given specimen already contained its primary key attributes - fully populated, then these attributes were used as criteria in the - UPDATE, so we have the primary key value; it is populated directly. - - 2. If the target backend supports RETURNING, then when the update() query - is performed with a RETURNING clause so that the matching primary key - is returned atomically. This currently includes Postgresql, Oracle - and others (notably not MySQL or SQLite). - - 3. If the target backend is MySQL, and the given model uses a - single-column, AUTO_INCREMENT integer primary key value (as is - the case for Nova), MySQL's recommended approach of making use - of ``LAST_INSERT_ID(expr)`` is used to atomically acquire the - matching primary key value within the scope of the UPDATE - statement, then it fetched immediately following by using - ``SELECT LAST_INSERT_ID()``. - http://dev.mysql.com/doc/refman/5.0/en/information-\ - functions.html#function_last-insert-id - - 4. Otherwise, for composite keys on MySQL or other backends such - as SQLite, the row as UPDATED must be re-fetched in order to - acquire the primary key value. The ``surrogate_key`` - parameter is used for this in order to re-fetch the row; this - is a column name with a known, unique value where - the object can be fetched. - - - """ - - if values is None: - values = {} - - entity = inspect(specimen) - mapper = entity.mapper - assert \ - [desc['type'] for desc in query.column_descriptions] == \ - [mapper.class_], "Query does not match given specimen" - - criteria = manufacture_entity_criteria( - specimen, include_only=include_only, exclude=[surrogate_key]) - - query = query.filter(criteria) - - if process_query: - query = process_query(query) - - surrogate_key_arg = ( - surrogate_key, entity.attrs[surrogate_key].loaded_value) - pk_value = None - - for attempt in range(attempts): - try: - pk_value = query.update_returning_pk(values, surrogate_key_arg) - except MultiRowsMatched: - raise - except NoRowsMatched: - if handle_failure and handle_failure(query): - break - else: - break - else: - raise NoRowsMatched("Zero rows matched for %d attempts" % attempts) - - if pk_value is None: - pk_value = entity.mapper.primary_key_from_instance(specimen) - - # NOTE(mdbooth): Can't pass the original specimen object here as it might - # have lists of multiple potential values rather than actual values. - values = copy.copy(values) - values[surrogate_key] = surrogate_key_arg[1] - persistent_obj = manufacture_persistent_object( - query.session, specimen.__class__(), values, pk_value) - - return persistent_obj - - -def manufacture_persistent_object( - session, specimen, values=None, primary_key=None): - """Make an ORM-mapped object persistent in a Session without SQL. - - The persistent object is returned. - - If a matching object is already present in the given session, the specimen - is merged into it and the persistent object returned. Otherwise, the - specimen itself is made persistent and is returned. - - The object must contain a full primary key, or provide it via the values or - primary_key parameters. The object is peristed to the Session in a "clean" - state with no pending changes. - - :param session: A Session object. - - :param specimen: a mapped object which is typically transient. - - :param values: a dictionary of values to be applied to the specimen, - in addition to the state that's already on it. The attributes will be - set such that no history is created; the object remains clean. - - :param primary_key: optional tuple-based primary key. This will also - be applied to the instance if present. - - - """ - state = inspect(specimen) - mapper = state.mapper - - for k, v in values.items(): - orm.attributes.set_committed_value(specimen, k, v) - - pk_attrs = [ - mapper.get_property_by_column(col).key - for col in mapper.primary_key - ] - - if primary_key is not None: - for key, value in zip(pk_attrs, primary_key): - orm.attributes.set_committed_value( - specimen, - key, - value - ) - - for key in pk_attrs: - if state.attrs[key].loaded_value is orm.attributes.NO_VALUE: - raise ValueError("full primary key must be present") - - orm.make_transient_to_detached(specimen) - - if state.key not in session.identity_map: - session.add(specimen) - return specimen - else: - return session.merge(specimen, load=False) - - -def manufacture_entity_criteria(entity, include_only=None, exclude=None): - """Given a mapped instance, produce a WHERE clause. - - The attributes set upon the instance will be combined to produce - a SQL expression using the mapped SQL expressions as the base - of comparison. - - Values on the instance may be set as tuples in which case the - criteria will produce an IN clause. None is also acceptable as a - scalar or tuple entry, which will produce IS NULL that is properly - joined with an OR against an IN expression if appropriate. - - :param entity: a mapped entity. - - :param include_only: optional sequence of keys to limit which - keys are included. - - :param exclude: sequence of keys to exclude - - """ - - state = inspect(entity) - exclude = set(exclude) if exclude is not None else set() - - existing = dict( - (attr.key, attr.loaded_value) - for attr in state.attrs - if attr.loaded_value is not orm.attributes.NO_VALUE - and attr.key not in exclude - ) - if include_only: - existing = dict( - (k, existing[k]) - for k in set(existing).intersection(include_only) - ) - - return manufacture_criteria(state.mapper, existing) - - -def manufacture_criteria(mapped, values): - """Given a mapper/class and a namespace of values, produce a WHERE clause. - - The class should be a mapped class and the entries in the dictionary - correspond to mapped attribute names on the class. - - A value may also be a tuple in which case that particular attribute - will be compared to a tuple using IN. The scalar value or - tuple can also contain None which translates to an IS NULL, that is - properly joined with OR against an IN expression if appropriate. - - :param cls: a mapped class, or actual :class:`.Mapper` object. - - :param values: dictionary of values. - - """ - - mapper = inspect(mapped) - - # organize keys using mapped attribute ordering, which is deterministic - value_keys = set(values) - keys = [k for k in mapper.column_attrs.keys() if k in value_keys] - return sql.and_(*[ - _sql_crit(mapper.column_attrs[key].expression, values[key]) - for key in keys - ]) - - -def _sql_crit(expression, value): - """Produce an equality expression against the given value. - - This takes into account a value that is actually a collection - of values, as well as a value of None or collection that contains - None. - - """ - - values = utils.to_list(value, default=(None, )) - if len(values) == 1: - if values[0] is None: - return expression == sql.null() - else: - return expression == values[0] - elif _none_set.intersection(values): - return sql.or_( - expression == sql.null(), - _sql_crit(expression, set(values).difference(_none_set)) - ) - else: - return expression.in_(values) - - -def update_returning_pk(query, values, surrogate_key): - """Perform an UPDATE, returning the primary key of the matched row. - - The primary key is returned using a selection of strategies: - - * if the database supports RETURNING, RETURNING is used to retrieve - the primary key values inline. - - * If the database is MySQL and the entity is mapped to a single integer - primary key column, MySQL's last_insert_id() function is used - inline within the UPDATE and then upon a second SELECT to get the - value. - - * Otherwise, a "refetch" strategy is used, where a given "surrogate" - key value (typically a UUID column on the entity) is used to run - a new SELECT against that UUID. This UUID is also placed into - the UPDATE query to ensure the row matches. - - :param query: a Query object with existing criterion, against a single - entity. - - :param values: a dictionary of values to be updated on the row. - - :param surrogate_key: a tuple of (attrname, value), referring to a - UNIQUE attribute that will also match the row. This attribute is used - to retrieve the row via a SELECT when no optimized strategy exists. - - :return: the primary key, returned as a tuple. - Is only returned if rows matched is one. Otherwise, CantUpdateException - is raised. - - """ - - entity = query.column_descriptions[0]['type'] - mapper = inspect(entity).mapper - session = query.session - - bind = session.connection(mapper=mapper) - if bind.dialect.implicit_returning: - pk_strategy = _pk_strategy_returning - elif bind.dialect.name == 'mysql' and \ - len(mapper.primary_key) == 1 and \ - isinstance( - mapper.primary_key[0].type, sqltypes.Integer): - pk_strategy = _pk_strategy_mysql_last_insert_id - else: - pk_strategy = _pk_strategy_refetch - - return pk_strategy(query, mapper, values, surrogate_key) - - -def _assert_single_row(rows_updated): - if rows_updated == 1: - return rows_updated - elif rows_updated > 1: - raise MultiRowsMatched("%d rows matched; expected one" % rows_updated) - else: - raise NoRowsMatched("No rows matched the UPDATE") - - -def _pk_strategy_refetch(query, mapper, values, surrogate_key): - - surrogate_key_name, surrogate_key_value = surrogate_key - surrogate_key_col = mapper.attrs[surrogate_key_name].expression - - rowcount = query.\ - filter(surrogate_key_col == surrogate_key_value).\ - update(values, synchronize_session=False) - - _assert_single_row(rowcount) - # SELECT my_table.id AS my_table_id FROM my_table - # WHERE my_table.y = ? AND my_table.z = ? - # LIMIT ? OFFSET ? - fetch_query = query.session.query( - *mapper.primary_key).filter( - surrogate_key_col == surrogate_key_value) - - primary_key = fetch_query.one() - - return primary_key - - -def _pk_strategy_returning(query, mapper, values, surrogate_key): - surrogate_key_name, surrogate_key_value = surrogate_key - surrogate_key_col = mapper.attrs[surrogate_key_name].expression - - update_stmt = _update_stmt_from_query(mapper, query, values) - update_stmt = update_stmt.where(surrogate_key_col == surrogate_key_value) - update_stmt = update_stmt.returning(*mapper.primary_key) - - # UPDATE my_table SET x=%(x)s, z=%(z)s WHERE my_table.y = %(y_1)s - # AND my_table.z = %(z_1)s RETURNING my_table.id - result = query.session.execute(update_stmt) - rowcount = result.rowcount - _assert_single_row(rowcount) - primary_key = tuple(result.first()) - - return primary_key - - -def _pk_strategy_mysql_last_insert_id(query, mapper, values, surrogate_key): - - surrogate_key_name, surrogate_key_value = surrogate_key - surrogate_key_col = mapper.attrs[surrogate_key_name].expression - - surrogate_pk_col = mapper.primary_key[0] - update_stmt = _update_stmt_from_query(mapper, query, values) - update_stmt = update_stmt.where(surrogate_key_col == surrogate_key_value) - update_stmt = update_stmt.values( - {surrogate_pk_col: sql.func.last_insert_id(surrogate_pk_col)}) - - # UPDATE my_table SET id=last_insert_id(my_table.id), - # x=%s, z=%s WHERE my_table.y = %s AND my_table.z = %s - result = query.session.execute(update_stmt) - rowcount = result.rowcount - _assert_single_row(rowcount) - # SELECT last_insert_id() AS last_insert_id_1 - primary_key = query.session.scalar(sql.func.last_insert_id()), - - return primary_key - - -def _update_stmt_from_query(mapper, query, values): - upd_values = dict( - ( - mapper.column_attrs[key], value - ) for key, value in values.items() - ) - query = query.enable_eagerloads(False) - context = query._compile_context() - primary_table = context.statement.froms[0] - update_stmt = sql.update(primary_table, - context.whereclause, - upd_values) - return update_stmt - - -_none_set = frozenset([None]) - - -class CantUpdateException(Exception): - pass - - -class NoRowsMatched(CantUpdateException): - pass - - -class MultiRowsMatched(CantUpdateException): - pass diff --git a/oslo_db/sqlalchemy/utils.py b/oslo_db/sqlalchemy/utils.py deleted file mode 100644 index 594a7f2..0000000 --- a/oslo_db/sqlalchemy/utils.py +++ /dev/null @@ -1,1161 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010-2011 OpenStack Foundation. -# Copyright 2012 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import contextlib -import itertools -import logging -import re - -from oslo_utils import timeutils -import six -import sqlalchemy -from sqlalchemy import Boolean -from sqlalchemy import CheckConstraint -from sqlalchemy import Column -from sqlalchemy.engine import Connectable -from sqlalchemy.engine import reflection -from sqlalchemy.engine import url as sa_url -from sqlalchemy import func -from sqlalchemy import Index -from sqlalchemy import inspect -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy.sql.expression import literal_column -from sqlalchemy.sql import text -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy.types import NullType - -from oslo_db import exception -from oslo_db._i18n import _, _LI, _LW -from oslo_db.sqlalchemy import models - -# NOTE(ochuprykov): Add references for backwards compatibility -InvalidSortKey = exception.InvalidSortKey -ColumnError = exception.ColumnError - -LOG = logging.getLogger(__name__) - -_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") - -_VALID_SORT_DIR = [ - "-".join(x) for x in itertools.product(["asc", "desc"], - ["nullsfirst", "nullslast"])] - - -def sanitize_db_url(url): - match = _DBURL_REGEX.match(url) - if match: - return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) - return url - - -# copy from glance/db/sqlalchemy/api.py -def paginate_query(query, model, limit, sort_keys, marker=None, - sort_dir=None, sort_dirs=None): - """Returns a query with sorting / pagination criteria added. - - Pagination works by requiring a unique sort_key, specified by sort_keys. - (If sort_keys is not unique, then we risk looping through values.) - We use the last row in the previous page as the 'marker' for pagination. - So we must return values that follow the passed marker in the order. - With a single-valued sort_key, this would be easy: sort_key > X. - With a compound-values sort_key, (k1, k2, k3) we must do this to repeat - the lexicographical ordering: - (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) - - We also have to cope with different sort_directions. - - Typically, the id of the last row is used as the client-facing pagination - marker, then the actual marker object must be fetched from the db and - passed in to us as marker. - - :param query: the query object to which we should add paging/sorting - :param model: the ORM model class - :param limit: maximum number of items to return - :param sort_keys: array of attributes by which results should be sorted - :param marker: the last item of the previous page; we returns the next - results after this value. - :param sort_dir: direction in which results should be sorted (asc, desc) - suffix -nullsfirst, -nullslast can be added to defined - the ordering of null values - :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys - - :rtype: sqlalchemy.orm.query.Query - :return: The query with sorting/pagination added. - """ - - if 'id' not in sort_keys: - # TODO(justinsb): If this ever gives a false-positive, check - # the actual primary key, rather than assuming its id - LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) - - assert(not (sort_dir and sort_dirs)) - - # Default the sort direction to ascending - if sort_dirs is None and sort_dir is None: - sort_dir = 'asc' - - # Ensure a per-column sort direction - if sort_dirs is None: - sort_dirs = [sort_dir for _sort_key in sort_keys] - - assert(len(sort_dirs) == len(sort_keys)) - - # Add sorting - for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): - try: - inspect(model).all_orm_descriptors[current_sort_key] - except KeyError: - raise exception.InvalidSortKey(current_sort_key) - else: - sort_key_attr = getattr(model, current_sort_key) - - try: - main_sort_dir, __, null_sort_dir = current_sort_dir.partition("-") - sort_dir_func = { - 'asc': sqlalchemy.asc, - 'desc': sqlalchemy.desc, - }[main_sort_dir] - - null_order_by_stmt = { - "": None, - "nullsfirst": sort_key_attr.is_(None), - "nullslast": sort_key_attr.isnot(None), - }[null_sort_dir] - except KeyError: - raise ValueError(_("Unknown sort direction, " - "must be one of: %s") % - ", ".join(_VALID_SORT_DIR)) - - if null_order_by_stmt is not None: - query = query.order_by(sqlalchemy.desc(null_order_by_stmt)) - query = query.order_by(sort_dir_func(sort_key_attr)) - - # Add pagination - if marker is not None: - marker_values = [] - for sort_key in sort_keys: - v = getattr(marker, sort_key) - marker_values.append(v) - - # Build up an array of sort criteria as in the docstring - criteria_list = [] - for i in range(len(sort_keys)): - crit_attrs = [] - for j in range(i): - model_attr = getattr(model, sort_keys[j]) - crit_attrs.append((model_attr == marker_values[j])) - - model_attr = getattr(model, sort_keys[i]) - if sort_dirs[i].startswith('desc'): - crit_attrs.append((model_attr < marker_values[i])) - else: - crit_attrs.append((model_attr > marker_values[i])) - - criteria = sqlalchemy.sql.and_(*crit_attrs) - criteria_list.append(criteria) - - f = sqlalchemy.sql.or_(*criteria_list) - query = query.filter(f) - - if limit is not None: - query = query.limit(limit) - - return query - - -def to_list(x, default=None): - if x is None: - return default - if not isinstance(x, collections.Iterable) or \ - isinstance(x, six.string_types): - return [x] - elif isinstance(x, list): - return x - else: - return list(x) - - -def _read_deleted_filter(query, db_model, deleted): - if 'deleted' not in db_model.__table__.columns: - raise ValueError(_("There is no `deleted` column in `%s` table. " - "Project doesn't use soft-deleted feature.") - % db_model.__name__) - - default_deleted_value = db_model.__table__.c.deleted.default.arg - if deleted: - query = query.filter(db_model.deleted != default_deleted_value) - else: - query = query.filter(db_model.deleted == default_deleted_value) - return query - - -def _project_filter(query, db_model, project_id): - if 'project_id' not in db_model.__table__.columns: - raise ValueError(_("There is no `project_id` column in `%s` table.") - % db_model.__name__) - - if isinstance(project_id, (list, tuple, set)): - query = query.filter(db_model.project_id.in_(project_id)) - else: - query = query.filter(db_model.project_id == project_id) - - return query - - -def model_query(model, session, args=None, **kwargs): - """Query helper for db.sqlalchemy api methods. - - This accounts for `deleted` and `project_id` fields. - - :param model: Model to query. Must be a subclass of ModelBase. - :type model: models.ModelBase - - :param session: The session to use. - :type session: sqlalchemy.orm.session.Session - - :param args: Arguments to query. If None - model is used. - :type args: tuple - - Keyword arguments: - - :keyword project_id: If present, allows filtering by project_id(s). - Can be either a project_id value, or an iterable of - project_id values, or None. If an iterable is passed, - only rows whose project_id column value is on the - `project_id` list will be returned. If None is passed, - only rows which are not bound to any project, will be - returned. - :type project_id: iterable, - model.__table__.columns.project_id.type, - None type - - :keyword deleted: If present, allows filtering by deleted field. - If True is passed, only deleted entries will be - returned, if False - only existing entries. - :type deleted: bool - - - Usage: - - .. code-block:: python - - from oslo_db.sqlalchemy import utils - - - def get_instance_by_uuid(uuid): - session = get_session() - with session.begin() - return (utils.model_query(models.Instance, session=session) - .filter(models.Instance.uuid == uuid) - .first()) - - def get_nodes_stat(): - data = (Node.id, Node.cpu, Node.ram, Node.hdd) - - session = get_session() - with session.begin() - return utils.model_query(Node, session=session, args=data).all() - - Also you can create your own helper, based on ``utils.model_query()``. - For example, it can be useful if you plan to use ``project_id`` and - ``deleted`` parameters from project's ``context`` - - .. code-block:: python - - from oslo_db.sqlalchemy import utils - - - def _model_query(context, model, session=None, args=None, - project_id=None, project_only=False, - read_deleted=None): - - # We suppose, that functions ``_get_project_id()`` and - # ``_get_deleted()`` should handle passed parameters and - # context object (for example, decide, if we need to restrict a user - # to query his own entries by project_id or only allow admin to read - # deleted entries). For return values, we expect to get - # ``project_id`` and ``deleted``, which are suitable for the - # ``model_query()`` signature. - kwargs = {} - if project_id is not None: - kwargs['project_id'] = _get_project_id(context, project_id, - project_only) - if read_deleted is not None: - kwargs['deleted'] = _get_deleted_dict(context, read_deleted) - session = session or get_session() - - with session.begin(): - return utils.model_query(model, session=session, - args=args, **kwargs) - - def get_instance_by_uuid(context, uuid): - return (_model_query(context, models.Instance, read_deleted='yes') - .filter(models.Instance.uuid == uuid) - .first()) - - def get_nodes_data(context, project_id, project_only='allow_none'): - data = (Node.id, Node.cpu, Node.ram, Node.hdd) - - return (_model_query(context, Node, args=data, project_id=project_id, - project_only=project_only) - .all()) - - """ - - if not issubclass(model, models.ModelBase): - raise TypeError(_("model should be a subclass of ModelBase")) - - query = session.query(model) if not args else session.query(*args) - if 'deleted' in kwargs: - query = _read_deleted_filter(query, model, kwargs['deleted']) - if 'project_id' in kwargs: - query = _project_filter(query, model, kwargs['project_id']) - - return query - - -def get_table(engine, name): - """Returns an sqlalchemy table dynamically from db. - - Needed because the models don't work for us in migrations - as models will be far out of sync with the current data. - - .. warning:: - - Do not use this method when creating ForeignKeys in database migrations - because sqlalchemy needs the same MetaData object to hold information - about the parent table and the reference table in the ForeignKey. This - method uses a unique MetaData object per table object so it won't work - with ForeignKey creation. - """ - metadata = MetaData() - metadata.bind = engine - return Table(name, metadata, autoload=True) - - -class InsertFromSelect(object): - """Form the base for `INSERT INTO table (SELECT ... )` statement. - - DEPRECATED: this class is deprecated and will be removed from oslo_db - in a few releases. Use default SQLAlchemy insert from select implementation - instead - - :param table: table to insert records - :param select: select query - :param cols: list of columns to specify in insert clause - :return: SQLAlchemy :class:`Insert` object instance - - Usage: - - .. code-block:: python - - select = sql.select(table_from) - insert = InsertFromSelect(table_to, select, - ['id', 'name', 'insert_date']) - engine.execute(insert) - - """ - # NOTE(tdurakov): Insert from select implementation added to SQLAlchemy - # starting from version 0.8.7. Default SQLAlchemy implementation should be - # used instead of this. Deprecated. - - def __new__(cls, table, select, cols=None): - if not cols: - cols = [c.name for c in table.c] - - return table.insert(inline=True).from_select(cols, select) - - def __init__(self, table, select, cols=None): - pass - - -def _get_not_supported_column(col_name_col_instance, column_name): - try: - column = col_name_col_instance[column_name] - except KeyError: - msg = _("Please specify column %s in col_name_col_instance " - "param. It is required because column has unsupported " - "type by SQLite.") - raise exception.ColumnError(msg % column_name) - - if not isinstance(column, Column): - msg = _("col_name_col_instance param has wrong type of " - "column instance for column %s It should be instance " - "of sqlalchemy.Column.") - raise exception.ColumnError(msg % column_name) - return column - - -def drop_old_duplicate_entries_from_table(migrate_engine, table_name, - use_soft_delete, *uc_column_names): - """Drop all old rows having the same values for columns in uc_columns. - - This method drop (or mark ad `deleted` if use_soft_delete is True) old - duplicate rows form table with name `table_name`. - - :param migrate_engine: Sqlalchemy engine - :param table_name: Table with duplicates - :param use_soft_delete: If True - values will be marked as `deleted`, - if False - values will be removed from table - :param uc_column_names: Unique constraint columns - """ - meta = MetaData() - meta.bind = migrate_engine - - table = Table(table_name, meta, autoload=True) - columns_for_group_by = [table.c[name] for name in uc_column_names] - - columns_for_select = [func.max(table.c.id)] - columns_for_select.extend(columns_for_group_by) - - duplicated_rows_select = sqlalchemy.sql.select( - columns_for_select, group_by=columns_for_group_by, - having=func.count(table.c.id) > 1) - - for row in migrate_engine.execute(duplicated_rows_select).fetchall(): - # NOTE(boris-42): Do not remove row that has the biggest ID. - delete_condition = table.c.id != row[0] - is_none = None # workaround for pyflakes - delete_condition &= table.c.deleted_at == is_none - for name in uc_column_names: - delete_condition &= table.c[name] == row[name] - - rows_to_delete_select = sqlalchemy.sql.select( - [table.c.id]).where(delete_condition) - for row in migrate_engine.execute(rows_to_delete_select).fetchall(): - LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " - "%(table)s"), dict(id=row[0], table=table_name)) - - if use_soft_delete: - delete_statement = table.update().\ - where(delete_condition).\ - values({ - 'deleted': literal_column('id'), - 'updated_at': literal_column('updated_at'), - 'deleted_at': timeutils.utcnow() - }) - else: - delete_statement = table.delete().where(delete_condition) - migrate_engine.execute(delete_statement) - - -def _get_default_deleted_value(table): - if isinstance(table.c.id.type, Integer): - return 0 - if isinstance(table.c.id.type, String): - return "" - raise exception.ColumnError(_("Unsupported id columns type")) - - -def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): - table = get_table(migrate_engine, table_name) - - insp = reflection.Inspector.from_engine(migrate_engine) - real_indexes = insp.get_indexes(table_name) - existing_index_names = dict( - [(index['name'], index['column_names']) for index in real_indexes]) - - # NOTE(boris-42): Restore indexes on `deleted` column - for index in indexes: - if 'deleted' not in index['column_names']: - continue - name = index['name'] - if name in existing_index_names: - column_names = [table.c[c] for c in existing_index_names[name]] - old_index = Index(name, *column_names, unique=index["unique"]) - old_index.drop(migrate_engine) - - column_names = [table.c[c] for c in index['column_names']] - new_index = Index(index["name"], *column_names, unique=index["unique"]) - new_index.create(migrate_engine) - - -def change_deleted_column_type_to_boolean(migrate_engine, table_name, - **col_name_col_instance): - if migrate_engine.name == "sqlite": - return _change_deleted_column_type_to_boolean_sqlite( - migrate_engine, table_name, **col_name_col_instance) - insp = reflection.Inspector.from_engine(migrate_engine) - indexes = insp.get_indexes(table_name) - - table = get_table(migrate_engine, table_name) - - old_deleted = Column('old_deleted', Boolean, default=False) - old_deleted.create(table, populate_default=False) - - table.update().\ - where(table.c.deleted == table.c.id).\ - values(old_deleted=True).\ - execute() - - table.c.deleted.drop() - table.c.old_deleted.alter(name="deleted") - - _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) - - -def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, - **col_name_col_instance): - insp = reflection.Inspector.from_engine(migrate_engine) - table = get_table(migrate_engine, table_name) - - columns = [] - for column in table.columns: - column_copy = None - if column.name != "deleted": - if isinstance(column.type, NullType): - column_copy = _get_not_supported_column(col_name_col_instance, - column.name) - else: - column_copy = column.copy() - else: - column_copy = Column('deleted', Boolean, default=0) - columns.append(column_copy) - - constraints = [constraint.copy() for constraint in table.constraints] - - meta = table.metadata - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create() - - indexes = [] - for index in insp.get_indexes(table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - c_select = [] - for c in table.c: - if c.name != "deleted": - c_select.append(c) - else: - c_select.append(table.c.deleted == table.c.id) - - ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select)) - migrate_engine.execute(ins) - - table.drop() - for index in indexes: - index.create(migrate_engine) - - new_table.rename(table_name) - new_table.update().\ - where(new_table.c.deleted == new_table.c.id).\ - values(deleted=True).\ - execute() - - -def change_deleted_column_type_to_id_type(migrate_engine, table_name, - **col_name_col_instance): - if migrate_engine.name == "sqlite": - return _change_deleted_column_type_to_id_type_sqlite( - migrate_engine, table_name, **col_name_col_instance) - insp = reflection.Inspector.from_engine(migrate_engine) - indexes = insp.get_indexes(table_name) - - table = get_table(migrate_engine, table_name) - - new_deleted = Column('new_deleted', table.c.id.type, - default=_get_default_deleted_value(table)) - new_deleted.create(table, populate_default=True) - - deleted = True # workaround for pyflakes - table.update().\ - where(table.c.deleted == deleted).\ - values(new_deleted=table.c.id).\ - execute() - table.c.deleted.drop() - table.c.new_deleted.alter(name="deleted") - - _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) - - -def _is_deleted_column_constraint(constraint): - # NOTE(boris-42): There is no other way to check is CheckConstraint - # associated with deleted column. - if not isinstance(constraint, CheckConstraint): - return False - sqltext = str(constraint.sqltext) - # NOTE(zzzeek): SQLite never reflected CHECK contraints here - # in any case until version 1.1. Safe to assume that any CHECK - # that's talking about the value of "deleted in (something)" is - # the boolean constraint we're looking to get rid of. - return bool(re.match(r".*deleted in \(.*\)", sqltext, re.I)) - - -def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, - **col_name_col_instance): - # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check - # constraints in sqlite DB and our `deleted` column has - # 2 check constraints. So there is only one way to remove - # these constraints: - # 1) Create new table with the same columns, constraints - # and indexes. (except deleted column). - # 2) Copy all data from old to new table. - # 3) Drop old table. - # 4) Rename new table to old table name. - insp = reflection.Inspector.from_engine(migrate_engine) - meta = MetaData(bind=migrate_engine) - table = Table(table_name, meta, autoload=True) - default_deleted_value = _get_default_deleted_value(table) - - columns = [] - for column in table.columns: - column_copy = None - if column.name != "deleted": - if isinstance(column.type, NullType): - column_copy = _get_not_supported_column(col_name_col_instance, - column.name) - else: - column_copy = column.copy() - else: - column_copy = Column('deleted', table.c.id.type, - default=default_deleted_value) - columns.append(column_copy) - - constraints = [] - for constraint in table.constraints: - if not _is_deleted_column_constraint(constraint): - constraints.append(constraint.copy()) - - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create() - - indexes = [] - for index in insp.get_indexes(table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - ins = InsertFromSelect(new_table, table.select()) - migrate_engine.execute(ins) - - table.drop() - for index in indexes: - index.create(migrate_engine) - - new_table.rename(table_name) - deleted = True # workaround for pyflakes - new_table.update().\ - where(new_table.c.deleted == deleted).\ - values(deleted=new_table.c.id).\ - execute() - - # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. - deleted = False # workaround for pyflakes - new_table.update().\ - where(new_table.c.deleted == deleted).\ - values(deleted=default_deleted_value).\ - execute() - - -def get_connect_string(backend, database, user=None, passwd=None, - host='localhost'): - """Get database connection - - Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped - - DEPRECATED: this function is deprecated and will be removed from oslo_db - in a few releases. Please use the provisioning system for dealing - with URLs and database provisioning. - - """ - args = {'backend': backend, - 'user': user, - 'passwd': passwd, - 'host': host, - 'database': database} - if backend == 'sqlite': - template = '%(backend)s:///%(database)s' - else: - template = "%(backend)s://%(user)s:%(passwd)s@%(host)s/%(database)s" - return template % args - - -def is_backend_avail(backend, database, user=None, passwd=None): - """Return True if the given backend is available. - - - DEPRECATED: this function is deprecated and will be removed from oslo_db - in a few releases. Please use the provisioning system to access - databases based on backend availability. - - """ - from oslo_db.sqlalchemy import provision - - connect_uri = get_connect_string(backend=backend, - database=database, - user=user, - passwd=passwd) - try: - eng = provision.Backend._ensure_backend_available(connect_uri) - eng.dispose() - except exception.BackendNotAvailable: - return False - else: - return True - - -def get_db_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - password = auth_pieces[1].strip() - - return (user, password, database, host) - - -def index_exists(migrate_engine, table_name, index_name): - """Check if given index exists. - - :param migrate_engine: sqlalchemy engine - :param table_name: name of the table - :param index_name: name of the index - """ - inspector = reflection.Inspector.from_engine(migrate_engine) - indexes = inspector.get_indexes(table_name) - index_names = [index['name'] for index in indexes] - return index_name in index_names - - -def add_index(migrate_engine, table_name, index_name, idx_columns): - """Create an index for given columns. - - :param migrate_engine: sqlalchemy engine - :param table_name: name of the table - :param index_name: name of the index - :param idx_columns: tuple with names of columns that will be indexed - """ - table = get_table(migrate_engine, table_name) - if not index_exists(migrate_engine, table_name, index_name): - index = Index( - index_name, *[getattr(table.c, col) for col in idx_columns] - ) - index.create() - else: - raise ValueError("Index '%s' already exists!" % index_name) - - -def drop_index(migrate_engine, table_name, index_name): - """Drop index with given name. - - :param migrate_engine: sqlalchemy engine - :param table_name: name of the table - :param index_name: name of the index - """ - table = get_table(migrate_engine, table_name) - for index in table.indexes: - if index.name == index_name: - index.drop() - break - else: - raise ValueError("Index '%s' not found!" % index_name) - - -def change_index_columns(migrate_engine, table_name, index_name, new_columns): - """Change set of columns that are indexed by given index. - - :param migrate_engine: sqlalchemy engine - :param table_name: name of the table - :param index_name: name of the index - :param new_columns: tuple with names of columns that will be indexed - """ - drop_index(migrate_engine, table_name, index_name) - add_index(migrate_engine, table_name, index_name, new_columns) - - -def column_exists(engine, table_name, column): - """Check if table has given column. - - :param engine: sqlalchemy engine - :param table_name: name of the table - :param column: name of the colmn - """ - t = get_table(engine, table_name) - return column in t.c - - -class DialectFunctionDispatcher(object): - @classmethod - def dispatch_for_dialect(cls, expr, multiple=False): - """Provide dialect-specific functionality within distinct functions. - - e.g.:: - - @dispatch_for_dialect("*") - def set_special_option(engine): - pass - - @set_special_option.dispatch_for("sqlite") - def set_sqlite_special_option(engine): - return engine.execute("sqlite thing") - - @set_special_option.dispatch_for("mysql+mysqldb") - def set_mysqldb_special_option(engine): - return engine.execute("mysqldb thing") - - After the above registration, the ``set_special_option()`` function - is now a dispatcher, given a SQLAlchemy ``Engine``, ``Connection``, - URL string, or ``sqlalchemy.engine.URL`` object:: - - eng = create_engine('...') - result = set_special_option(eng) - - The filter system supports two modes, "multiple" and "single". - The default is "single", and requires that one and only one function - match for a given backend. In this mode, the function may also - have a return value, which will be returned by the top level - call. - - "multiple" mode, on the other hand, does not support return - arguments, but allows for any number of matching functions, where - each function will be called:: - - # the initial call sets this up as a "multiple" dispatcher - @dispatch_for_dialect("*", multiple=True) - def set_options(engine): - # set options that apply to *all* engines - - @set_options.dispatch_for("postgresql") - def set_postgresql_options(engine): - # set options that apply to all Postgresql engines - - @set_options.dispatch_for("postgresql+psycopg2") - def set_postgresql_psycopg2_options(engine): - # set options that apply only to "postgresql+psycopg2" - - @set_options.dispatch_for("*+pyodbc") - def set_pyodbc_options(engine): - # set options that apply to all pyodbc backends - - Note that in both modes, any number of additional arguments can be - accepted by member functions. For example, to populate a dictionary of - options, it may be passed in:: - - @dispatch_for_dialect("*", multiple=True) - def set_engine_options(url, opts): - pass - - @set_engine_options.dispatch_for("mysql+mysqldb") - def _mysql_set_default_charset_to_utf8(url, opts): - opts.setdefault('charset', 'utf-8') - - @set_engine_options.dispatch_for("sqlite") - def _set_sqlite_in_memory_check_same_thread(url, opts): - if url.database in (None, 'memory'): - opts['check_same_thread'] = False - - opts = {} - set_engine_options(url, opts) - - The driver specifiers are of the form: - ``[+]``. That is, database name or "*", - followed by an optional ``+`` sign with driver or "*". Omitting - the driver name implies all drivers for that database. - - """ - if multiple: - cls = DialectMultiFunctionDispatcher - else: - cls = DialectSingleFunctionDispatcher - return cls().dispatch_for(expr) - - _db_plus_driver_reg = re.compile(r'([^+]+?)(?:\+(.+))?$') - - def dispatch_for(self, expr): - def decorate(fn): - dbname, driver = self._parse_dispatch(expr) - if fn is self: - fn = fn._last - self._last = fn - self._register(expr, dbname, driver, fn) - return self - return decorate - - def _parse_dispatch(self, text): - m = self._db_plus_driver_reg.match(text) - if not m: - raise ValueError("Couldn't parse database[+driver]: %r" % text) - return m.group(1) or '*', m.group(2) or '*' - - def __call__(self, *arg, **kw): - target = arg[0] - return self._dispatch_on( - self._url_from_target(target), target, arg, kw) - - def _url_from_target(self, target): - if isinstance(target, Connectable): - return target.engine.url - elif isinstance(target, six.string_types): - if "://" not in target: - target_url = sa_url.make_url("%s://" % target) - else: - target_url = sa_url.make_url(target) - return target_url - elif isinstance(target, sa_url.URL): - return target - else: - raise ValueError("Invalid target type: %r" % target) - - def dispatch_on_drivername(self, drivername): - """Return a sub-dispatcher for the given drivername. - - This provides a means of calling a different function, such as the - "*" function, for a given target object that normally refers - to a sub-function. - - """ - dbname, driver = self._db_plus_driver_reg.match(drivername).group(1, 2) - - def go(*arg, **kw): - return self._dispatch_on_db_driver(dbname, "*", arg, kw) - - return go - - def _dispatch_on(self, url, target, arg, kw): - dbname, driver = self._db_plus_driver_reg.match( - url.drivername).group(1, 2) - if not driver: - driver = url.get_dialect().driver - - return self._dispatch_on_db_driver(dbname, driver, arg, kw) - - def _invoke_fn(self, fn, arg, kw): - return fn(*arg, **kw) - - -class DialectSingleFunctionDispatcher(DialectFunctionDispatcher): - def __init__(self): - self.reg = collections.defaultdict(dict) - - def _register(self, expr, dbname, driver, fn): - fn_dict = self.reg[dbname] - if driver in fn_dict: - raise TypeError("Multiple functions for expression %r" % expr) - fn_dict[driver] = fn - - def _matches(self, dbname, driver): - for db in (dbname, '*'): - subdict = self.reg[db] - for drv in (driver, '*'): - if drv in subdict: - return subdict[drv] - else: - raise ValueError( - "No default function found for driver: %r" % - ("%s+%s" % (dbname, driver))) - - def _dispatch_on_db_driver(self, dbname, driver, arg, kw): - fn = self._matches(dbname, driver) - return self._invoke_fn(fn, arg, kw) - - -class DialectMultiFunctionDispatcher(DialectFunctionDispatcher): - def __init__(self): - self.reg = collections.defaultdict( - lambda: collections.defaultdict(list)) - - def _register(self, expr, dbname, driver, fn): - self.reg[dbname][driver].append(fn) - - def _matches(self, dbname, driver): - if driver != '*': - drivers = (driver, '*') - else: - drivers = ('*', ) - - for db in (dbname, '*'): - subdict = self.reg[db] - for drv in drivers: - for fn in subdict[drv]: - yield fn - - def _dispatch_on_db_driver(self, dbname, driver, arg, kw): - for fn in self._matches(dbname, driver): - if self._invoke_fn(fn, arg, kw) is not None: - raise TypeError( - "Return value not allowed for " - "multiple filtered function") - -dispatch_for_dialect = DialectFunctionDispatcher.dispatch_for_dialect - - -def get_non_innodb_tables(connectable, skip_tables=('migrate_version', - 'alembic_version')): - """Get a list of tables which don't use InnoDB storage engine. - - :param connectable: a SQLAlchemy Engine or a Connection instance - :param skip_tables: a list of tables which might have a different - storage engine - """ - - query_str = """ - SELECT table_name - FROM information_schema.tables - WHERE table_schema = :database AND - engine != 'InnoDB' - """ - - params = {} - if skip_tables: - params = dict( - ('skip_%s' % i, table_name) - for i, table_name in enumerate(skip_tables) - ) - - placeholders = ', '.join(':' + p for p in params) - query_str += ' AND table_name NOT IN (%s)' % placeholders - - params['database'] = connectable.engine.url.database - query = text(query_str) - noninnodb = connectable.execute(query, **params) - return [i[0] for i in noninnodb] - - -class NonCommittingConnectable(object): - """A ``Connectable`` substitute which rolls all operations back. - - ``NonCommittingConnectable`` forms the basis of mock - ``Engine`` and ``Connection`` objects within a test. It provides - only that part of the API that should reasonably be used within - a single-connection test environment (e.g. no engine.dispose(), - connection.invalidate(), etc. ). The connection runs both within - a transaction as well as a savepoint. The transaction is there - so that any operations upon the connection can be rolled back. - If the test calls begin(), a "pseduo" transaction is returned that - won't actually commit anything. The subtransaction is there to allow - a test to successfully call rollback(), however, where all operations - to that point will be rolled back and the operations can continue, - simulating a real rollback while still remaining within a transaction - external to the test. - - """ - - def __init__(self, connection): - self.connection = connection - self._trans = connection.begin() - self._restart_nested() - - def _restart_nested(self): - self._nested_trans = self.connection.begin_nested() - - def _dispose(self): - if not self.connection.closed: - self._nested_trans.rollback() - self._trans.rollback() - self.connection.close() - - def execute(self, obj, *multiparams, **params): - """Executes the given construct and returns a :class:`.ResultProxy`.""" - - return self.connection.execute(obj, *multiparams, **params) - - def scalar(self, obj, *multiparams, **params): - """Executes and returns the first column of the first row.""" - - return self.connection.scalar(obj, *multiparams, **params) - - -class NonCommittingEngine(NonCommittingConnectable): - """``Engine`` -specific non committing connectbale.""" - - @property - def url(self): - return self.connection.engine.url - - @property - def engine(self): - return self - - def connect(self): - return NonCommittingConnection(self.connection) - - @contextlib.contextmanager - def begin(self): - conn = self.connect() - trans = conn.begin() - try: - yield conn - except Exception: - trans.rollback() - else: - trans.commit() - - -class NonCommittingConnection(NonCommittingConnectable): - """``Connection`` -specific non committing connectbale.""" - - def close(self): - """Close the 'Connection'. - - In this context, close() is a no-op. - - """ - pass - - def begin(self): - return NonCommittingTransaction(self, self.connection.begin()) - - def __enter__(self): - return self - - def __exit__(self, *arg): - pass - - -class NonCommittingTransaction(object): - """A wrapper for ``Transaction``. - - This is to accommodate being able to guaranteed start a new - SAVEPOINT when a transaction is rolled back. - - """ - def __init__(self, provisioned, transaction): - self.provisioned = provisioned - self.transaction = transaction - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if type is None: - try: - self.commit() - except Exception: - self.rollback() - raise - else: - self.rollback() - - def commit(self): - self.transaction.commit() - - def rollback(self): - self.transaction.rollback() - self.provisioned._restart_nested() diff --git a/oslo_db/tests/__init__.py b/oslo_db/tests/__init__.py deleted file mode 100644 index f080dde..0000000 --- a/oslo_db/tests/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2014 Rackspace -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - - -def should_run_eventlet_tests(): - return bool(int(os.environ.get('TEST_EVENTLET') or '0')) - - -if should_run_eventlet_tests(): - import eventlet - eventlet.monkey_patch() diff --git a/oslo_db/tests/base.py b/oslo_db/tests/base.py deleted file mode 100644 index 69e6a80..0000000 --- a/oslo_db/tests/base.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import fixtures -import testtools - -_TRUE_VALUES = ('true', '1', 'yes') - -# FIXME(dhellmann) Update this to use oslo.test library - - -class TestCase(testtools.TestCase): - - """Test case base class for all unit tests.""" - - def setUp(self): - """Run before each test method to initialize test environment.""" - - super(TestCase, self).setUp() - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) - - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - self.log_fixture = self.useFixture(fixtures.FakeLogger()) diff --git a/oslo_db/tests/sqlalchemy/__init__.py b/oslo_db/tests/sqlalchemy/__init__.py deleted file mode 100644 index cb712d2..0000000 --- a/oslo_db/tests/sqlalchemy/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import test_base - -load_tests = test_base.optimize_db_test_loader(__file__) diff --git a/oslo_db/tests/sqlalchemy/test_async_eventlet.py b/oslo_db/tests/sqlalchemy/test_async_eventlet.py deleted file mode 100644 index 58e4787..0000000 --- a/oslo_db/tests/sqlalchemy/test_async_eventlet.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) 2014 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for SQLAlchemy and eventlet interaction.""" - -import logging -import unittest2 - -from oslo_utils import importutils -import sqlalchemy as sa -from sqlalchemy.ext import declarative as sa_decl - -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import test_base -from oslo_db import tests - - -class EventletTestMixin(object): - def setUp(self): - super(EventletTestMixin, self).setUp() - - BASE = sa_decl.declarative_base() - - class TmpTable(BASE, models.ModelBase): - __tablename__ = 'test_async_eventlet' - id = sa.Column('id', sa.Integer, primary_key=True, nullable=False) - foo = sa.Column('foo', sa.Integer) - __table_args__ = ( - sa.UniqueConstraint('foo', name='uniq_foo'), - ) - - self.test_table = TmpTable - TmpTable.__table__.create(self.engine) - self.addCleanup(lambda: TmpTable.__table__.drop(self.engine)) - - @unittest2.skipIf(not tests.should_run_eventlet_tests(), - 'eventlet tests disabled unless TEST_EVENTLET=1') - def test_concurrent_transaction(self): - # Cause sqlalchemy to log executed SQL statements. Useful to - # determine exactly what and when was sent to DB. - sqla_logger = logging.getLogger('sqlalchemy.engine') - sqla_logger.setLevel(logging.INFO) - self.addCleanup(sqla_logger.setLevel, logging.NOTSET) - - def operate_on_row(name, ready=None, proceed=None): - logging.debug('%s starting', name) - _session = self.sessionmaker() - with _session.begin(): - logging.debug('%s ready', name) - - # Modify the same row, inside transaction - tbl = self.test_table() - tbl.update({'foo': 10}) - tbl.save(_session) - - if ready is not None: - ready.send() - if proceed is not None: - logging.debug('%s waiting to proceed', name) - proceed.wait() - logging.debug('%s exiting transaction', name) - logging.debug('%s terminating', name) - return True - - eventlet = importutils.try_import('eventlet') - if eventlet is None: - return self.skip('eventlet is required for this test') - - a_ready = eventlet.event.Event() - a_proceed = eventlet.event.Event() - b_proceed = eventlet.event.Event() - - # thread A opens transaction - logging.debug('spawning A') - a = eventlet.spawn(operate_on_row, 'A', - ready=a_ready, proceed=a_proceed) - logging.debug('waiting for A to enter transaction') - a_ready.wait() - - # thread B opens transaction on same row - logging.debug('spawning B') - b = eventlet.spawn(operate_on_row, 'B', - proceed=b_proceed) - logging.debug('waiting for B to (attempt to) enter transaction') - eventlet.sleep(1) # should(?) advance B to blocking on transaction - - # While B is still blocked, A should be able to proceed - a_proceed.send() - - # Will block forever(*) if DB library isn't reentrant. - # (*) Until some form of timeout/deadlock detection kicks in. - # This is the key test that async is working. If this hangs - # (or raises a timeout/deadlock exception), then you have failed - # this test. - self.assertTrue(a.wait()) - - b_proceed.send() - # If everything proceeded without blocking, B will throw a - # "duplicate entry" exception when it tries to insert the same row - self.assertRaises(db_exc.DBDuplicateEntry, b.wait) - - -# Note that sqlite fails the above concurrency tests, and is not -# mentioned below. -# ie: This file performs no tests by default. - -class MySQLEventletTestCase(EventletTestMixin, - test_base.MySQLOpportunisticTestCase): - pass - - -class PostgreSQLEventletTestCase(EventletTestMixin, - test_base.PostgreSQLOpportunisticTestCase): - pass diff --git a/oslo_db/tests/sqlalchemy/test_enginefacade.py b/oslo_db/tests/sqlalchemy/test_enginefacade.py deleted file mode 100644 index 517b7f9..0000000 --- a/oslo_db/tests/sqlalchemy/test_enginefacade.py +++ /dev/null @@ -1,2051 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import contextlib -import copy -import pickle -import warnings - -import mock -from oslo_config import cfg -from oslo_context import context as oslo_context -from oslotest import base as oslo_test_base -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy.orm import mapper -from sqlalchemy import select -from sqlalchemy import String -from sqlalchemy import Table - -from oslo_db import exception -from oslo_db import options -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import engines as oslo_engines -from oslo_db.sqlalchemy import orm -from oslo_db.sqlalchemy import test_base - - -enginefacade.transaction_context_provider(oslo_context.RequestContext) - - -class SingletonOnName(mock.MagicMock): - def __init__(self, the_name, **kw): - super(SingletonOnName, self).__init__( - __eq__=lambda self, other: other._assert_name == self._assert_name, - _assert_name=the_name, - **kw - ) - - def __deepcopy__(self, memo): - return self - - -class SingletonConnection(SingletonOnName): - def __init__(self, **kw): - super(SingletonConnection, self).__init__( - "connection", **kw) - - -class SingletonEngine(SingletonOnName): - def __init__(self, connection, **kw): - super(SingletonEngine, self).__init__( - "engine", - connect=mock.Mock(return_value=connection), - pool=mock.Mock(), - url=connection, - _assert_connection=connection, - **kw - ) - - -class NonDecoratedContext(object): - """a Context object that's not run through transaction_context_provider.""" - - -class AssertDataSource(collections.namedtuple( - "AssertDataSource", ["writer", "reader", "async_reader"])): - - def element_for_writer(self, const): - if const is enginefacade._WRITER: - return self.writer - elif const is enginefacade._READER: - return self.reader - elif const is enginefacade._ASYNC_READER: - return self.async_reader - else: - assert False, "Unknown constant: %s" % const - - -class MockFacadeTest(oslo_test_base.BaseTestCase): - """test by applying mocks to internal call-points. - - This applies mocks to - oslo.db.sqlalchemy.engines.create_engine() and - oslo.db.sqlalchemy.orm.get_maker(), then mocking a - _TransactionFactory into - oslo.db.sqlalchemy.enginefacade._context_manager._root_factory. - - Various scenarios are run against the enginefacade functions, and the - exact calls made against the mock create_engine(), get_maker(), and - associated objects are tested exactly against expected calls. - - """ - - synchronous_reader = True - - engine_uri = 'some_connection' - slave_uri = None - - def setUp(self): - super(MockFacadeTest, self).setUp() - - writer_conn = SingletonConnection() - writer_engine = SingletonEngine(writer_conn) - writer_session = mock.Mock( - connection=mock.Mock(return_value=writer_conn)) - writer_maker = mock.Mock(return_value=writer_session) - - if self.slave_uri: - async_reader_conn = SingletonConnection() - async_reader_engine = SingletonEngine(async_reader_conn) - async_reader_session = mock.Mock( - connection=mock.Mock(return_value=async_reader_conn)) - async_reader_maker = mock.Mock(return_value=async_reader_session) - - else: - async_reader_conn = writer_conn - async_reader_engine = writer_engine - async_reader_session = writer_session - async_reader_maker = writer_maker - - if self.synchronous_reader: - reader_conn = async_reader_conn - reader_engine = async_reader_engine - reader_session = async_reader_session - reader_maker = async_reader_maker - else: - reader_conn = writer_conn - reader_engine = writer_engine - reader_session = writer_session - reader_maker = writer_maker - - self.connections = AssertDataSource( - writer_conn, reader_conn, async_reader_conn - ) - self.engines = AssertDataSource( - writer_engine, reader_engine, async_reader_engine - ) - self.sessions = AssertDataSource( - writer_session, reader_session, async_reader_session - ) - self.makers = AssertDataSource( - writer_maker, reader_maker, async_reader_maker - ) - - def get_maker(engine, **kw): - if engine is writer_engine: - return self.makers.writer - elif engine is reader_engine: - return self.makers.reader - elif engine is async_reader_engine: - return self.makers.async_reader - else: - assert False - - session_patch = mock.patch.object( - orm, "get_maker", - side_effect=get_maker) - self.get_maker = session_patch.start() - self.addCleanup(session_patch.stop) - - def create_engine(sql_connection, **kw): - if sql_connection == self.engine_uri: - return self.engines.writer - elif sql_connection == self.slave_uri: - return self.engines.async_reader - else: - assert False - - engine_patch = mock.patch.object( - oslo_engines, "create_engine", side_effect=create_engine) - - self.create_engine = engine_patch.start() - self.addCleanup(engine_patch.stop) - - self.factory = enginefacade._TransactionFactory() - self.factory.configure( - synchronous_reader=self.synchronous_reader - ) - - self.factory.configure( - connection=self.engine_uri, - slave_connection=self.slave_uri - ) - - facade_patcher = mock.patch.object( - enginefacade._context_manager, "_root_factory", self.factory) - facade_patcher.start() - self.addCleanup(facade_patcher.stop) - - def _assert_ctx_connection(self, context, connection): - self.assertIs(context.connection, connection) - - def _assert_ctx_session(self, context, session): - self.assertIs(context.session, session) - - def _assert_non_decorated_ctx_connection(self, context, connection): - transaction_ctx = enginefacade._transaction_ctx_for_context(context) - self.assertIs(transaction_ctx.connection, connection) - - def _assert_non_decorated_ctx_session(self, context, session): - transaction_ctx = enginefacade._transaction_ctx_for_context(context) - self.assertIs(transaction_ctx.session, session) - - @contextlib.contextmanager - def _assert_engines(self): - """produce a mock series of engine calls. - - These are expected to match engine-related calls established - by the test subject. - - """ - - writer_conn = SingletonConnection() - writer_engine = SingletonEngine(writer_conn) - if self.slave_uri: - async_reader_conn = SingletonConnection() - async_reader_engine = SingletonEngine(async_reader_conn) - else: - async_reader_conn = writer_conn - async_reader_engine = writer_engine - - if self.synchronous_reader: - reader_engine = async_reader_engine - else: - reader_engine = writer_engine - - engines = AssertDataSource( - writer_engine, reader_engine, async_reader_engine) - - def create_engine(sql_connection, **kw): - if sql_connection == self.engine_uri: - return engines.writer - elif sql_connection == self.slave_uri: - return engines.async_reader - else: - assert False - - engine_factory = mock.Mock(side_effect=create_engine) - engine_factory( - sql_connection=self.engine_uri, - **dict((k, mock.ANY) for k in self.factory._engine_cfg.keys()) - ) - if self.slave_uri: - engine_factory( - sql_connection=self.slave_uri, - **dict((k, mock.ANY) for k in self.factory._engine_cfg.keys()) - ) - - yield AssertDataSource( - writer_engine, reader_engine, async_reader_engine - ) - - self.assertEqual( - engine_factory.mock_calls, - self.create_engine.mock_calls - ) - - for sym in [ - enginefacade._WRITER, enginefacade._READER, - enginefacade._ASYNC_READER - ]: - self.assertEqual( - engines.element_for_writer(sym).mock_calls, - self.engines.element_for_writer(sym).mock_calls - ) - - def _assert_async_reader_connection(self, engines, session=None): - return self._assert_connection( - engines, enginefacade._ASYNC_READER, session) - - def _assert_reader_connection(self, engines, session=None): - return self._assert_connection(engines, enginefacade._READER, session) - - def _assert_writer_connection(self, engines, session=None): - return self._assert_connection(engines, enginefacade._WRITER, session) - - @contextlib.contextmanager - def _assert_connection(self, engines, writer, session=None): - """produce a mock series of connection calls. - - These are expected to match connection-related calls established - by the test subject. - - """ - if session: - connection = session.connection() - yield connection - else: - connection = engines.element_for_writer(writer).connect() - trans = connection.begin() - yield connection - if writer is enginefacade._WRITER: - trans.commit() - else: - trans.rollback() - connection.close() - - self.assertEqual( - connection.mock_calls, - self.connections.element_for_writer(writer).mock_calls) - - @contextlib.contextmanager - def _assert_makers(self, engines): - - writer_session = mock.Mock(connection=mock.Mock( - return_value=engines.writer._assert_connection) - ) - writer_maker = mock.Mock(return_value=writer_session) - - if self.slave_uri: - async_reader_session = mock.Mock(connection=mock.Mock( - return_value=engines.async_reader._assert_connection) - ) - async_reader_maker = mock.Mock(return_value=async_reader_session) - else: - async_reader_session = writer_session - async_reader_maker = writer_maker - - if self.synchronous_reader: - reader_maker = async_reader_maker - else: - reader_maker = writer_maker - - makers = AssertDataSource( - writer_maker, - reader_maker, - async_reader_maker, - ) - - def get_maker(engine, **kw): - if engine is engines.writer: - return makers.writer - elif engine is engines.reader: - return makers.reader - elif engine is engines.async_reader: - return makers.async_reader - else: - assert False - - maker_factories = mock.Mock(side_effect=get_maker) - - maker_factories( - autocommit=True, engine=engines.writer, - expire_on_commit=False) - if self.slave_uri: - maker_factories( - autocommit=True, engine=engines.async_reader, - expire_on_commit=False) - - yield makers - - self.assertEqual( - maker_factories.mock_calls, - self.get_maker.mock_calls) - - for sym in [ - enginefacade._WRITER, enginefacade._READER, - enginefacade._ASYNC_READER - ]: - self.assertEqual( - makers.element_for_writer(sym).mock_calls, - self.makers.element_for_writer(sym).mock_calls) - - def _assert_async_reader_session( - self, makers, connection=None, assert_calls=True): - return self._assert_session( - makers, enginefacade._ASYNC_READER, connection, assert_calls) - - def _assert_reader_session( - self, makers, connection=None, assert_calls=True): - return self._assert_session( - makers, enginefacade._READER, - connection, assert_calls) - - def _assert_writer_session( - self, makers, connection=None, assert_calls=True): - return self._assert_session( - makers, enginefacade._WRITER, - connection, assert_calls) - - @contextlib.contextmanager - def _assert_session( - self, makers, writer, connection=None, assert_calls=True): - """produce a mock series of session calls. - - These are expected to match session-related calls established - by the test subject. - - """ - - if connection: - session = makers.element_for_writer(writer)(bind=connection) - else: - session = makers.element_for_writer(writer)() - session.begin() - yield session - if writer is enginefacade._WRITER: - session.commit() - elif enginefacade.\ - _context_manager._factory._transaction_ctx_cfg[ - 'rollback_reader_sessions']: - session.rollback() - session.close() - - if assert_calls: - self.assertEqual( - session.mock_calls, - self.sessions.element_for_writer(writer).mock_calls) - - def test_dispose_pool(self): - facade = enginefacade.transaction_context() - - facade.configure( - connection=self.engine_uri, - ) - - facade.dispose_pool() - self.assertFalse(hasattr(facade._factory, '_writer_engine')) - - facade._factory._start() - facade.dispose_pool() - - self.assertEqual( - facade._factory._writer_engine.pool.mock_calls, - [mock.call.dispose()] - ) - - def test_dispose_pool_w_reader(self): - facade = enginefacade.transaction_context() - - facade.configure( - connection=self.engine_uri, - slave_connection=self.slave_uri - ) - - facade.dispose_pool() - self.assertFalse(hasattr(facade._factory, '_writer_engine')) - self.assertFalse(hasattr(facade._factory, '_reader_engine')) - - facade._factory._start() - facade.dispose_pool() - - self.assertEqual( - facade._factory._writer_engine.pool.mock_calls, - [mock.call.dispose()] - ) - self.assertEqual( - facade._factory._reader_engine.pool.mock_calls, - [mock.call.dispose()] - ) - - def test_session_reader_decorator(self): - context = oslo_context.RequestContext() - - @enginefacade.reader - def go(context): - context.session.execute("test") - go(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test") - - def test_connection_reader_decorator(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.connection - def go(context): - context.connection.execute("test") - go(context) - - with self._assert_engines() as engines: - with self._assert_reader_connection(engines) as connection: - connection.execute("test") - - def test_session_reader_nested_in_connection_reader(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.connection - def go1(context): - context.connection.execute("test1") - go2(context) - - @enginefacade.reader - def go2(context): - context.session.execute("test2") - go1(context) - - with self._assert_engines() as engines: - with self._assert_reader_connection(engines) as connection: - connection.execute("test1") - with self._assert_makers(engines) as makers: - with self._assert_reader_session( - makers, connection) as session: - session.execute("test2") - - def test_connection_reader_nested_in_session_reader(self): - context = oslo_context.RequestContext() - - @enginefacade.reader - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.reader.connection - def go2(context): - context.connection.execute("test2") - - go1(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test1") - with self._assert_reader_connection( - engines, session) as connection: - connection.execute("test2") - - def test_session_reader_decorator_nested(self): - context = oslo_context.RequestContext() - - @enginefacade.reader - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.reader - def go2(context): - context.session.execute("test2") - go1(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test1") - session.execute("test2") - - def test_reader_nested_in_writer_ok(self): - context = oslo_context.RequestContext() - - @enginefacade.writer - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.reader - def go2(context): - context.session.execute("test2") - - go1(context) - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_writer_session(makers) as session: - session.execute("test1") - session.execute("test2") - - def test_writer_nested_in_reader_raises(self): - context = oslo_context.RequestContext() - - @enginefacade.reader - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.writer - def go2(context): - context.session.execute("test2") - - exc = self.assertRaises( - TypeError, go1, context - ) - self.assertEqual( - "Can't upgrade a READER " - "transaction to a WRITER mid-transaction", - exc.args[0] - ) - - def test_async_on_writer_raises(self): - exc = self.assertRaises( - TypeError, getattr, enginefacade.writer, "async" - ) - self.assertEqual( - "Setting async on a WRITER makes no sense", - exc.args[0] - ) - - def test_savepoint_and_independent_raises(self): - exc = self.assertRaises( - TypeError, getattr, enginefacade.writer.independent, "savepoint" - ) - self.assertEqual( - "setting savepoint and independent makes no sense.", - exc.args[0] - ) - - def test_reader_nested_in_async_reader_raises(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.async - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.reader - def go2(context): - context.session.execute("test2") - - exc = self.assertRaises( - TypeError, go1, context - ) - self.assertEqual( - "Can't upgrade an ASYNC_READER transaction " - "to a READER mid-transaction", - exc.args[0] - ) - - def test_reader_allow_async_nested_in_async_reader(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.async - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.reader.allow_async - def go2(context): - context.session.execute("test2") - - go1(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_async_reader_session(makers) as session: - session.execute("test1") - session.execute("test2") - - def test_reader_allow_async_nested_in_reader(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.reader - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.reader.allow_async - def go2(context): - context.session.execute("test2") - - go1(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test1") - session.execute("test2") - - def test_reader_allow_async_is_reader_by_default(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.allow_async - def go1(context): - context.session.execute("test1") - - go1(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test1") - - def test_writer_nested_in_async_reader_raises(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.async - def go1(context): - context.session.execute("test1") - go2(context) - - @enginefacade.writer - def go2(context): - context.session.execute("test2") - - exc = self.assertRaises( - TypeError, go1, context - ) - self.assertEqual( - "Can't upgrade an ASYNC_READER transaction to a " - "WRITER mid-transaction", - exc.args[0] - ) - - def test_reader_then_writer_ok(self): - context = oslo_context.RequestContext() - - @enginefacade.reader - def go1(context): - context.session.execute("test1") - - @enginefacade.writer - def go2(context): - context.session.execute("test2") - - go1(context) - go2(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session( - makers, assert_calls=False) as session: - session.execute("test1") - with self._assert_writer_session(makers) as session: - session.execute("test2") - - def test_async_reader_then_reader_ok(self): - context = oslo_context.RequestContext() - - @enginefacade.reader.async - def go1(context): - context.session.execute("test1") - - @enginefacade.reader - def go2(context): - context.session.execute("test2") - - go1(context) - go2(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_async_reader_session( - makers, assert_calls=False) as session: - session.execute("test1") - with self._assert_reader_session(makers) as session: - session.execute("test2") - - def test_using_reader(self): - context = oslo_context.RequestContext() - - with enginefacade.reader.using(context) as session: - self._assert_ctx_session(context, session) - session.execute("test1") - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test1") - - def test_using_reader_rollback_reader_session(self): - enginefacade.configure(rollback_reader_sessions=True) - - context = oslo_context.RequestContext() - - with enginefacade.reader.using(context) as session: - self._assert_ctx_session(context, session) - session.execute("test1") - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test1") - - def test_using_writer(self): - context = oslo_context.RequestContext() - - with enginefacade.writer.using(context) as session: - self._assert_ctx_session(context, session) - session.execute("test1") - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_writer_session(makers) as session: - session.execute("test1") - - def test_using_writer_no_descriptors(self): - context = NonDecoratedContext() - - with enginefacade.writer.using(context) as session: - self._assert_non_decorated_ctx_session(context, session) - session.execute("test1") - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_writer_session(makers) as session: - session.execute("test1") - - def test_using_writer_connection_no_descriptors(self): - context = NonDecoratedContext() - - with enginefacade.writer.connection.using(context) as connection: - self._assert_non_decorated_ctx_connection(context, connection) - connection.execute("test1") - - with self._assert_engines() as engines: - with self._assert_writer_connection(engines) as conn: - conn.execute("test1") - - def test_using_reader_connection(self): - context = oslo_context.RequestContext() - - with enginefacade.reader.connection.using(context) as connection: - self._assert_ctx_connection(context, connection) - connection.execute("test1") - - with self._assert_engines() as engines: - with self._assert_reader_connection(engines) as conn: - conn.execute("test1") - - def test_using_writer_connection(self): - context = oslo_context.RequestContext() - - with enginefacade.writer.connection.using(context) as connection: - self._assert_ctx_connection(context, connection) - connection.execute("test1") - - with self._assert_engines() as engines: - with self._assert_writer_connection(engines) as conn: - conn.execute("test1") - - def test_context_copied_using_existing_writer_connection(self): - context = oslo_context.RequestContext() - - with enginefacade.writer.connection.using(context) as connection: - self._assert_ctx_connection(context, connection) - connection.execute("test1") - - ctx2 = copy.deepcopy(context) - - with enginefacade.reader.connection.using(ctx2) as conn2: - self.assertIs(conn2, connection) - self._assert_ctx_connection(ctx2, conn2) - - conn2.execute("test2") - - with self._assert_engines() as engines: - with self._assert_writer_connection(engines) as conn: - conn.execute("test1") - conn.execute("test2") - - def test_context_nodesc_copied_using_existing_writer_connection(self): - context = NonDecoratedContext() - - with enginefacade.writer.connection.using(context) as connection: - self._assert_non_decorated_ctx_connection(context, connection) - connection.execute("test1") - - ctx2 = copy.deepcopy(context) - - with enginefacade.reader.connection.using(ctx2) as conn2: - self.assertIs(conn2, connection) - self._assert_non_decorated_ctx_connection(ctx2, conn2) - - conn2.execute("test2") - - with self._assert_engines() as engines: - with self._assert_writer_connection(engines) as conn: - conn.execute("test1") - conn.execute("test2") - - def test_session_context_notrequested_exception(self): - context = oslo_context.RequestContext() - - with enginefacade.reader.connection.using(context): - exc = self.assertRaises( - exception.ContextNotRequestedError, - getattr, context, 'session' - ) - - self.assertRegexpMatches( - exc.args[0], - "The 'session' context attribute was requested but it has " - "not been established for this context." - ) - - def test_connection_context_notrequested_exception(self): - context = oslo_context.RequestContext() - - with enginefacade.reader.using(context): - exc = self.assertRaises( - exception.ContextNotRequestedError, - getattr, context, 'connection' - ) - - self.assertRegexpMatches( - exc.args[0], - "The 'connection' context attribute was requested but it has " - "not been established for this context." - ) - - def test_session_context_exception(self): - context = oslo_context.RequestContext() - exc = self.assertRaises( - exception.NoEngineContextEstablished, - getattr, context, 'session' - ) - - self.assertRegexpMatches( - exc.args[0], - "No TransactionContext is established for " - "this .*RequestContext.* object within the current " - "thread; the 'session' attribute is unavailable." - ) - - def test_session_context_getattr(self): - context = oslo_context.RequestContext() - self.assertIsNone(getattr(context, 'session', None)) - - def test_connection_context_exception(self): - context = oslo_context.RequestContext() - exc = self.assertRaises( - exception.NoEngineContextEstablished, - getattr, context, 'connection' - ) - - self.assertRegexpMatches( - exc.args[0], - "No TransactionContext is established for " - "this .*RequestContext.* object within the current " - "thread; the 'connection' attribute is unavailable." - ) - - def test_connection_context_getattr(self): - context = oslo_context.RequestContext() - self.assertIsNone(getattr(context, 'connection', None)) - - def test_transaction_context_exception(self): - context = oslo_context.RequestContext() - exc = self.assertRaises( - exception.NoEngineContextEstablished, - getattr, context, 'transaction' - ) - - self.assertRegexpMatches( - exc.args[0], - "No TransactionContext is established for " - "this .*RequestContext.* object within the current " - "thread; the 'transaction' attribute is unavailable." - ) - - def test_transaction_context_getattr(self): - context = oslo_context.RequestContext() - self.assertIsNone(getattr(context, 'transaction', None)) - - def test_trans_ctx_context_exception(self): - context = oslo_context.RequestContext() - exc = self.assertRaises( - exception.NoEngineContextEstablished, - getattr, context, 'transaction_ctx' - ) - - self.assertRegexpMatches( - exc.args[0], - "No TransactionContext is established for " - "this .*RequestContext.* object within the current " - "thread." - ) - - def test_trans_ctx_context_getattr(self): - context = oslo_context.RequestContext() - self.assertIsNone(getattr(context, 'transaction_ctx', None)) - - def test_multiple_factories(self): - """Test that the instrumentation applied to a context class is - - independent of a specific _TransactionContextManager - / _TransactionFactory. - - """ - mgr1 = enginefacade.transaction_context() - mgr1.configure( - connection=self.engine_uri, - slave_connection=self.slave_uri - ) - mgr2 = enginefacade.transaction_context() - mgr2.configure( - connection=self.engine_uri, - slave_connection=self.slave_uri - ) - - context = oslo_context.RequestContext() - - self.assertRaises( - exception.NoEngineContextEstablished, - getattr, context, 'session' - ) - with mgr1.writer.using(context): - self.assertIs(context.transaction_ctx.factory, mgr1._factory) - self.assertIsNot(context.transaction_ctx.factory, mgr2._factory) - self.assertIsNotNone(context.session) - - self.assertRaises( - exception.NoEngineContextEstablished, - getattr, context, 'session' - ) - with mgr2.writer.using(context): - self.assertIsNot(context.transaction_ctx.factory, mgr1._factory) - self.assertIs(context.transaction_ctx.factory, mgr2._factory) - self.assertIsNotNone(context.session) - - def test_multiple_factories_nested(self): - """Test that the instrumentation applied to a context class supports - - nested calls among multiple _TransactionContextManager objects. - - """ - mgr1 = enginefacade.transaction_context() - mgr1.configure( - connection=self.engine_uri, - slave_connection=self.slave_uri - ) - mgr2 = enginefacade.transaction_context() - mgr2.configure( - connection=self.engine_uri, - slave_connection=self.slave_uri - ) - - context = oslo_context.RequestContext() - - with mgr1.writer.using(context): - self.assertIs(context.transaction_ctx.factory, mgr1._factory) - self.assertIsNot(context.transaction_ctx.factory, mgr2._factory) - - with mgr2.writer.using(context): - self.assertIsNot( - context.transaction_ctx.factory, mgr1._factory) - self.assertIs(context.transaction_ctx.factory, mgr2._factory) - self.assertIsNotNone(context.session) - - # mgr1 is restored - self.assertIs(context.transaction_ctx.factory, mgr1._factory) - self.assertIsNot(context.transaction_ctx.factory, mgr2._factory) - self.assertIsNotNone(context.session) - - self.assertRaises( - exception.NoEngineContextEstablished, - getattr, context, 'transaction_ctx' - ) - - def test_context_found_for_bound_method(self): - context = oslo_context.RequestContext() - - @enginefacade.reader - def go(self, context): - context.session.execute("test") - go(self, context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test") - - def test_context_found_for_class_method(self): - context = oslo_context.RequestContext() - - class Spam(object): - @classmethod - @enginefacade.reader - def go(cls, context): - context.session.execute("test") - Spam.go(context) - - with self._assert_engines() as engines: - with self._assert_makers(engines) as makers: - with self._assert_reader_session(makers) as session: - session.execute("test") - - -class PatchFactoryTest(oslo_test_base.BaseTestCase): - - def test_patch_manager(self): - normal_mgr = enginefacade.transaction_context() - normal_mgr.configure(connection="sqlite:///foo.db") - alt_mgr = enginefacade.transaction_context() - alt_mgr.configure(connection="sqlite:///bar.db") - - @normal_mgr.writer - def go1(context): - s1 = context.session - self.assertEqual( - s1.bind.url, "sqlite:///foo.db") - self.assertIs( - s1.bind, - normal_mgr._factory._writer_engine) - - @normal_mgr.writer - def go2(context): - s1 = context.session - - self.assertEqual( - s1.bind.url, - "sqlite:///bar.db") - - self.assertIs( - normal_mgr._factory._writer_engine, - alt_mgr._factory._writer_engine - ) - - def create_engine(sql_connection, **kw): - return mock.Mock(url=sql_connection) - - with mock.patch( - "oslo_db.sqlalchemy.engines.create_engine", create_engine): - context = oslo_context.RequestContext() - go1(context) - reset = normal_mgr.patch_factory(alt_mgr) - go2(context) - reset() - go1(context) - - def test_patch_factory(self): - normal_mgr = enginefacade.transaction_context() - normal_mgr.configure(connection="sqlite:///foo.db") - alt_mgr = enginefacade.transaction_context() - alt_mgr.configure(connection="sqlite:///bar.db") - - @normal_mgr.writer - def go1(context): - s1 = context.session - self.assertEqual( - s1.bind.url, "sqlite:///foo.db") - self.assertIs( - s1.bind, - normal_mgr._factory._writer_engine) - - @normal_mgr.writer - def go2(context): - s1 = context.session - - self.assertEqual( - s1.bind.url, - "sqlite:///bar.db") - - self.assertIs( - normal_mgr._factory._writer_engine, - alt_mgr._factory._writer_engine - ) - - def create_engine(sql_connection, **kw): - return mock.Mock(url=sql_connection) - - with mock.patch( - "oslo_db.sqlalchemy.engines.create_engine", create_engine): - context = oslo_context.RequestContext() - go1(context) - reset = normal_mgr.patch_factory(alt_mgr._factory) - go2(context) - reset() - go1(context) - - def test_patch_engine(self): - normal_mgr = enginefacade.transaction_context() - normal_mgr.configure(connection="sqlite:///foo.db") - - @normal_mgr.writer - def go1(context): - s1 = context.session - self.assertEqual( - s1.bind.url, "sqlite:///foo.db") - self.assertIs( - s1.bind, - normal_mgr._factory._writer_engine) - - @normal_mgr.writer - def go2(context): - s1 = context.session - - self.assertEqual( - s1.bind.url, - "sqlite:///bar.db") - - def create_engine(sql_connection, **kw): - return mock.Mock(url=sql_connection) - - with mock.patch( - "oslo_db.sqlalchemy.engines.create_engine", create_engine): - mock_engine = create_engine("sqlite:///bar.db") - - context = oslo_context.RequestContext() - go1(context) - reset = normal_mgr.patch_engine(mock_engine) - go2(context) - self.assertIs( - normal_mgr._factory._writer_engine, mock_engine) - reset() - go1(context) - - def test_new_manager_from_config(self): - normal_mgr = enginefacade.transaction_context() - normal_mgr.configure( - connection="sqlite://", - sqlite_fk=True, - mysql_sql_mode="FOOBAR", - max_overflow=38 - ) - - normal_mgr._factory._start() - - copied_mgr = normal_mgr.make_new_manager() - - self.assertTrue(normal_mgr._factory._started) - self.assertIsNotNone(normal_mgr._factory._writer_engine) - - self.assertIsNot(copied_mgr._factory, normal_mgr._factory) - self.assertFalse(copied_mgr._factory._started) - copied_mgr._factory._start() - self.assertIsNot( - normal_mgr._factory._writer_engine, - copied_mgr._factory._writer_engine) - - engine_args = copied_mgr._factory._engine_args_for_conf(None) - self.assertTrue(engine_args['sqlite_fk']) - self.assertEqual("FOOBAR", engine_args["mysql_sql_mode"]) - self.assertEqual(38, engine_args["max_overflow"]) - - -class SynchronousReaderWSlaveMockFacadeTest(MockFacadeTest): - synchronous_reader = True - - engine_uri = 'some_connection' - slave_uri = 'some_slave_connection' - - -class AsyncReaderWSlaveMockFacadeTest(MockFacadeTest): - synchronous_reader = False - - engine_uri = 'some_connection' - slave_uri = 'some_slave_connection' - - -class LegacyIntegrationtest(test_base.DbTestCase): - - def test_legacy_integration(self): - legacy_facade = enginefacade.get_legacy_facade() - self.assertTrue( - legacy_facade.get_engine() is - enginefacade._context_manager._factory._writer_engine - ) - - self.assertTrue( - enginefacade.get_legacy_facade() is legacy_facade - ) - - def test_get_sessionmaker(self): - legacy_facade = enginefacade.get_legacy_facade() - self.assertTrue( - legacy_facade.get_sessionmaker() is - enginefacade._context_manager._factory._writer_maker - ) - - def test_legacy_facades_from_different_context_managers(self): - transaction_context1 = enginefacade.transaction_context() - transaction_context2 = enginefacade.transaction_context() - - transaction_context1.configure(connection='sqlite:///?conn1') - transaction_context2.configure(connection='sqlite:///?conn2') - - legacy1 = transaction_context1.get_legacy_facade() - legacy2 = transaction_context2.get_legacy_facade() - - self.assertNotEqual(legacy1, legacy2) - - def test_legacy_not_started(self): - - factory = enginefacade._TransactionFactory() - - self.assertRaises( - exception.CantStartEngineError, - factory.get_legacy_facade - ) - - legacy_facade = factory.get_legacy_facade() - self.assertRaises( - exception.CantStartEngineError, - legacy_facade.get_session - ) - - self.assertRaises( - exception.CantStartEngineError, - legacy_facade.get_session - ) - - self.assertRaises( - exception.CantStartEngineError, - legacy_facade.get_engine - ) - - -class ThreadingTest(test_base.DbTestCase): - """Test copy/pickle on new threads using real connections and sessions.""" - - def _assert_ctx_connection(self, context, connection): - self.assertIs(context.connection, connection) - - def _assert_ctx_session(self, context, session): - self.assertIs(context.session, session) - - def _patch_thread_ident(self): - self.ident = 1 - - test_instance = self - - class MockThreadingLocal(object): - def __init__(self): - self.__dict__['state'] = collections.defaultdict(dict) - - def __deepcopy__(self, memo): - return self - - def __getattr__(self, key): - ns = self.state[test_instance.ident] - try: - return ns[key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - ns = self.state[test_instance.ident] - ns[key] = value - - def __delattr__(self, key): - ns = self.state[test_instance.ident] - try: - del ns[key] - except KeyError: - raise AttributeError(key) - - return mock.patch.object( - enginefacade, "_TransactionContextTLocal", MockThreadingLocal) - - def test_thread_ctxmanager_writer(self): - context = oslo_context.RequestContext() - - with self._patch_thread_ident(): - with enginefacade.writer.using(context) as session: - self._assert_ctx_session(context, session) - - self.ident = 2 - - with enginefacade.reader.using(context) as sess2: - # new session - self.assertIsNot(sess2, session) - - # thread local shows the new session - self._assert_ctx_session(context, sess2) - - self.ident = 1 - - with enginefacade.reader.using(context) as sess3: - self.assertIs(sess3, session) - self._assert_ctx_session(context, session) - - def test_thread_ctxmanager_writer_connection(self): - context = oslo_context.RequestContext() - - with self._patch_thread_ident(): - with enginefacade.writer.connection.using(context) as conn: - self._assert_ctx_connection(context, conn) - - self.ident = 2 - - with enginefacade.reader.connection.using(context) as conn2: - # new connection - self.assertIsNot(conn2, conn) - - # thread local shows the new connection - self._assert_ctx_connection(context, conn2) - - with enginefacade.reader.connection.using( - context) as conn3: - # we still get the right connection even though - # this context is not the "copied" context - self.assertIsNot(conn3, conn) - self.assertIs(conn3, conn2) - - self.ident = 1 - - with enginefacade.reader.connection.using(context) as conn3: - self.assertIs(conn3, conn) - self._assert_ctx_connection(context, conn) - - def test_thread_ctxmanager_switch_styles(self): - - @enginefacade.writer.connection - def go_one(context): - self.assertRaises( - exception.ContextNotRequestedError, - getattr, context, "session" - ) - self.assertIsNotNone(context.connection) - - self.ident = 2 - go_two(context) - - self.ident = 1 - self.assertRaises( - exception.ContextNotRequestedError, - getattr, context, "session" - ) - self.assertIsNotNone(context.connection) - - @enginefacade.reader - def go_two(context): - self.assertRaises( - exception.ContextNotRequestedError, - getattr, context, "connection" - ) - self.assertIsNotNone(context.session) - - context = oslo_context.RequestContext() - with self._patch_thread_ident(): - go_one(context) - - def test_thread_decorator_writer(self): - sessions = set() - - @enginefacade.writer - def go_one(context): - sessions.add(context.session) - - self.ident = 2 - go_two(context) - - self.ident = 1 - - go_three(context) - - @enginefacade.reader - def go_two(context): - assert context.session not in sessions - - @enginefacade.reader - def go_three(context): - assert context.session in sessions - - context = oslo_context.RequestContext() - with self._patch_thread_ident(): - go_one(context) - - def test_thread_decorator_writer_connection(self): - connections = set() - - @enginefacade.writer.connection - def go_one(context): - connections.add(context.connection) - - self.ident = 2 - go_two(context) - - self.ident = 1 - - go_three(context) - - @enginefacade.reader.connection - def go_two(context): - assert context.connection not in connections - - @enginefacade.reader - def go_three(context): - assert context.connection in connections - - context = oslo_context.RequestContext() - with self._patch_thread_ident(): - go_one(context) - - def test_contexts_picklable(self): - context = oslo_context.RequestContext() - - with enginefacade.writer.using(context) as session: - self._assert_ctx_session(context, session) - - pickled = pickle.dumps(context) - - unpickled = pickle.loads(pickled) - - with enginefacade.writer.using(unpickled) as session2: - self._assert_ctx_session(unpickled, session2) - - assert session is not session2 - - -class LiveFacadeTest(test_base.DbTestCase): - """test using live SQL with test-provisioned databases. - - Several of these tests require that multiple transactions run - simultaenously; as the default SQLite :memory: connection can't achieve - this, opportunistic test implementations against MySQL and PostgreSQL are - supplied. - - """ - - def setUp(self): - super(LiveFacadeTest, self).setUp() - - metadata = MetaData() - user_table = Table( - 'user', metadata, - Column('id', Integer, primary_key=True), - Column('name', String(30)), - mysql_engine='InnoDB' - ) - self.user_table = user_table - metadata.create_all(self.engine) - self.addCleanup(metadata.drop_all, self.engine) - - class User(object): - def __init__(self, name): - self.name = name - - mapper(User, user_table) - self.User = User - - def _assert_ctx_connection(self, context, connection): - self.assertIs(context.connection, connection) - - def _assert_ctx_session(self, context, session): - self.assertIs(context.session, session) - - def test_transaction_committed(self): - context = oslo_context.RequestContext() - - with enginefacade.writer.using(context) as session: - session.add(self.User(name="u1")) - - session = self.sessionmaker(autocommit=True) - self.assertEqual( - "u1", - session.query(self.User.name).scalar() - ) - - def test_transaction_rollback(self): - context = oslo_context.RequestContext() - - class MyException(Exception): - pass - - @enginefacade.writer - def go(context): - context.session.add(self.User(name="u1")) - context.session.flush() - raise MyException("a test") - - self.assertRaises(MyException, go, context) - - session = self.sessionmaker(autocommit=True) - self.assertEqual( - None, - session.query(self.User.name).scalar() - ) - - def test_context_deepcopy_on_session(self): - context = oslo_context.RequestContext() - with enginefacade.writer.using(context) as session: - - ctx2 = copy.deepcopy(context) - self._assert_ctx_session(ctx2, session) - - with enginefacade.writer.using(ctx2) as s2: - self.assertIs(session, s2) - self._assert_ctx_session(ctx2, s2) - - s2.add(self.User(name="u1")) - s2.flush() - - session = self.sessionmaker(autocommit=True) - self.assertEqual( - "u1", - session.query(self.User.name).scalar() - ) - - def test_context_deepcopy_on_connection(self): - context = oslo_context.RequestContext() - with enginefacade.writer.connection.using(context) as conn: - - ctx2 = copy.deepcopy(context) - self._assert_ctx_connection(ctx2, conn) - - with enginefacade.writer.connection.using(ctx2) as conn2: - self.assertIs(conn, conn2) - self._assert_ctx_connection(ctx2, conn2) - - conn2.execute(self.user_table.insert().values(name="u1")) - - self._assert_ctx_connection(ctx2, conn2) - - session = self.sessionmaker(autocommit=True) - self.assertEqual( - "u1", - session.query(self.User.name).scalar() - ) - - @test_base.backend_specific("postgresql", "mysql") - def test_external_session_transaction(self): - context = oslo_context.RequestContext() - with enginefacade.writer.using(context) as session: - session.add(self.User(name="u1")) - session.flush() - - with enginefacade.writer.independent.using(context) as s2: - # transaction() uses a new session - self.assertIsNot(s2, session) - self._assert_ctx_session(context, s2) - - # rows within a distinct transaction - s2.add(self.User(name="u2")) - - # it also takes over the global enginefacade - # within the context - with enginefacade.writer.using(context) as s3: - self.assertIs(s3, s2) - s3.add(self.User(name="u3")) - - self._assert_ctx_session(context, session) - - # rollback the "outer" transaction - session.rollback() - - # add more state on the "outer" transaction - session.begin() - session.add(self.User(name="u4")) - - session = self.sessionmaker(autocommit=True) - - # inner transction + second part of "outer" transaction were committed - self.assertEqual( - [("u2",), ("u3",), ("u4", )], - session.query( - self.User.name).order_by(self.User.name).all() - ) - - def test_savepoint_transaction_decorator(self): - context = oslo_context.RequestContext() - - @enginefacade.writer - def go1(context): - session = context.session - session.add(self.User(name="u1")) - session.flush() - - try: - go2(context) - except Exception: - pass - - go3(context) - - session.add(self.User(name="u4")) - - @enginefacade.writer.savepoint - def go2(context): - session = context.session - session.add(self.User(name="u2")) - raise Exception("nope") - - @enginefacade.writer.savepoint - def go3(context): - session = context.session - session.add(self.User(name="u3")) - - go1(context) - - session = self.sessionmaker(autocommit=True) - - # inner transction + second part of "outer" transaction were committed - self.assertEqual( - [("u1",), ("u3",), ("u4", )], - session.query( - self.User.name).order_by(self.User.name).all() - ) - - def test_savepoint_transaction(self): - context = oslo_context.RequestContext() - - with enginefacade.writer.using(context) as session: - session.add(self.User(name="u1")) - session.flush() - - try: - with enginefacade.writer.savepoint.using(context) as session: - session.add(self.User(name="u2")) - raise Exception("nope") - except Exception: - pass - - with enginefacade.writer.savepoint.using(context) as session: - session.add(self.User(name="u3")) - - session.add(self.User(name="u4")) - - session = self.sessionmaker(autocommit=True) - - # inner transction + second part of "outer" transaction were committed - self.assertEqual( - [("u1",), ("u3",), ("u4", )], - session.query( - self.User.name).order_by(self.User.name).all() - ) - - @test_base.backend_specific("postgresql", "mysql") - def test_external_session_transaction_decorator(self): - context = oslo_context.RequestContext() - - @enginefacade.writer - def go1(context): - session = context.session - session.add(self.User(name="u1")) - session.flush() - - go2(context, session) - - self._assert_ctx_session(context, session) - - # rollback the "outer" transaction - session.rollback() - - # add more state on the "outer" transaction - session.begin() - session.add(self.User(name="u4")) - - @enginefacade.writer.independent - def go2(context, session): - s2 = context.session - # uses a new session - self.assertIsNot(s2, session) - self._assert_ctx_session(context, s2) - - # rows within a distinct transaction - s2.add(self.User(name="u2")) - - # it also takes over the global enginefacade - # within the context - with enginefacade.writer.using(context) as s3: - self.assertIs(s3, s2) - s3.add(self.User(name="u3")) - - go1(context) - - session = self.sessionmaker(autocommit=True) - - # inner transction + second part of "outer" transaction were committed - self.assertEqual( - [("u2",), ("u3",), ("u4", )], - session.query( - self.User.name).order_by(self.User.name).all() - ) - - @test_base.backend_specific("postgresql", "mysql") - def test_external_connection_transaction(self): - context = oslo_context.RequestContext() - with enginefacade.writer.connection.using(context) as connection: - connection.execute(self.user_table.insert().values(name="u1")) - - # transaction() uses a new Connection - with enginefacade.writer.independent.connection.\ - using(context) as c2: - self.assertIsNot(c2, connection) - self._assert_ctx_connection(context, c2) - - # rows within a distinct transaction - c2.execute(self.user_table.insert().values(name="u2")) - - # it also takes over the global enginefacade - # within the context - with enginefacade.writer.connection.using(context) as c3: - self.assertIs(c2, c3) - c3.execute(self.user_table.insert().values(name="u3")) - self._assert_ctx_connection(context, connection) - - # rollback the "outer" transaction - transaction_ctx = context.transaction_ctx - transaction_ctx.transaction.rollback() - transaction_ctx.transaction = connection.begin() - - # add more state on the "outer" transaction - connection.execute(self.user_table.insert().values(name="u4")) - - session = self.sessionmaker(autocommit=True) - self.assertEqual( - [("u2",), ("u3",), ("u4", )], - session.query( - self.User.name).order_by(self.User.name).all() - ) - - @test_base.backend_specific("postgresql", "mysql") - def test_external_writer_in_reader(self): - context = oslo_context.RequestContext() - with enginefacade.reader.using(context) as session: - ping = session.scalar(select([1])) - self.assertEqual(1, ping) - - # we're definitely a reader - @enginefacade.writer - def go(ctx): - pass - exc = self.assertRaises(TypeError, go, context) - self.assertEqual( - "Can't upgrade a READER transaction to a " - "WRITER mid-transaction", - exc.args[0]) - - # but we can do a writer on a new transaction - with enginefacade.writer.independent.using(context) as sess2: - self.assertIsNot(sess2, session) - self._assert_ctx_session(context, sess2) - - session.add(self.User(name="u1_nocommit")) - sess2.add(self.User(name="u1_commit")) - - user = session.query(self.User).first() - self.assertEqual("u1_commit", user.name) - - session = self.sessionmaker(autocommit=True) - self.assertEqual( - [("u1_commit",)], - session.query( - self.User.name).order_by(self.User.name).all() - ) - - def test_replace_scope(self): - # "timeout" is an argument accepted by - # the pysqlite dialect, which we set here to ensure - # that even in an all-sqlite test, we test that the URL - # is different in the context we are looking for - alt_connection = "sqlite:///?timeout=90" - - alt_mgr1 = enginefacade.transaction_context() - alt_mgr1.configure( - connection=alt_connection, - ) - - @enginefacade.writer - def go1(context): - s1 = context.session - self.assertEqual( - s1.bind.url, - enginefacade._context_manager._factory._writer_engine.url) - self.assertIs( - s1.bind, - enginefacade._context_manager._factory._writer_engine) - self.assertEqual(s1.bind.url, self.engine.url) - - with alt_mgr1.replace.using(context): - go2(context) - - go4(context) - - @enginefacade.writer - def go2(context): - s2 = context.session - - # factory is not replaced globally... - self.assertIsNot( - enginefacade._context_manager._factory._writer_engine, - alt_mgr1._factory._writer_engine - ) - - # but it is replaced for us - self.assertIs(s2.bind, alt_mgr1._factory._writer_engine) - self.assertEqual( - str(s2.bind.url), alt_connection) - - go3(context) - - @enginefacade.reader - def go3(context): - s3 = context.session - - # in a call of a call, we still have the alt URL - self.assertIs(s3.bind, alt_mgr1._factory._writer_engine) - self.assertEqual( - str(s3.bind.url), alt_connection) - - @enginefacade.writer - def go4(context): - s4 = context.session - - # outside the "replace" context, all is back to normal - self.assertIs(s4.bind, self.engine) - self.assertEqual( - s4.bind.url, self.engine.url) - - context = oslo_context.RequestContext() - go1(context) - self.assertIsNot( - enginefacade._context_manager._factory._writer_engine, - alt_mgr1._factory._writer_engine - ) - - def test_replace_scope_only_global_eng(self): - # "timeout" is an argument accepted by - # the pysqlite dialect, which we set here to ensure - # that even in an all-sqlite test, we test that the URL - # is different in the context we are looking for - alt_connection1 = "sqlite:///?timeout=90" - - alt_mgr1 = enginefacade.transaction_context() - alt_mgr1.configure( - connection=alt_connection1, - ) - - alt_connection2 = "sqlite:///?timeout=120" - - alt_mgr2 = enginefacade.transaction_context() - alt_mgr2.configure( - connection=alt_connection2, - ) - - @enginefacade.writer - def go1(context): - s1 = context.session - # global engine - self.assertEqual(s1.bind.url, self.engine.url) - - # now replace global engine... - with alt_mgr1.replace.using(context): - go2(context) - - # and back - go6(context) - - @enginefacade.writer - def go2(context): - s2 = context.session - - # we have the replace-the-global engine - self.assertEqual(str(s2.bind.url), alt_connection1) - self.assertIs(s2.bind, alt_mgr1._factory._writer_engine) - - go3(context) - - @alt_mgr2.writer - def go3(context): - s3 = context.session - - # we don't use the global engine in the first place. - # make sure our own factory still used. - self.assertEqual(str(s3.bind.url), alt_connection2) - self.assertIs(s3.bind, alt_mgr2._factory._writer_engine) - - go4(context) - - @enginefacade.writer - def go4(context): - s4 = context.session - - # we *do* use the global, so we still want the replacement. - self.assertEqual(str(s4.bind.url), alt_connection1) - self.assertIs(s4.bind, alt_mgr1._factory._writer_engine) - - @enginefacade.writer - def go5(context): - s5 = context.session - - # ...and here also - self.assertEqual(str(s5.bind.url), alt_connection1) - self.assertIs(s5.bind, alt_mgr1._factory._writer_engine) - - @enginefacade.writer - def go6(context): - s6 = context.session - - # ...but not here! - self.assertEqual(str(s6.bind.url), str(self.engine.url)) - self.assertIs(s6.bind, self.engine) - - context = oslo_context.RequestContext() - go1(context) - - -class MySQLLiveFacadeTest( - test_base.MySQLOpportunisticTestCase, LiveFacadeTest): - pass - - -class PGLiveFacadeTest( - test_base.PostgreSQLOpportunisticTestCase, LiveFacadeTest): - pass - - -class ConfigOptionsTest(oslo_test_base.BaseTestCase): - def test_all_options(self): - """test that everything in CONF.database.iteritems() is accepted. - - There's a handful of options in oslo.db.options that seem to have - no meaning, but need to be accepted. In particular, Cinder and - maybe others are doing exactly this call. - - """ - - factory = enginefacade._TransactionFactory() - cfg.CONF.register_opts(options.database_opts, 'database') - factory.configure(**dict(cfg.CONF.database.items())) - - def test_options_not_supported(self): - factory = enginefacade._TransactionFactory() - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - factory.configure(fake1='x', idle_timeout=200, wrong2='y') - - self.assertEqual(1, len(w)) - self.assertTrue( - issubclass(w[-1].category, exception.NotSupportedWarning)) - self.assertEqual( - "Configuration option(s) ['fake1', 'wrong2'] not supported", - str(w[-1].message) - ) - - def test_no_engine(self): - factory = enginefacade._TransactionFactory() - - self.assertRaises( - exception.CantStartEngineError, - factory._create_session, enginefacade._WRITER - ) - - self.assertRaises( - exception.CantStartEngineError, - factory._create_session, enginefacade._WRITER - ) - - -class TestTransactionFactoryCallback(oslo_test_base.BaseTestCase): - - def test_setup_for_connection_called_with_profiler(self): - context_manager = enginefacade.transaction_context() - context_manager.configure(connection='sqlite://') - hook = mock.Mock() - context_manager.append_on_engine_create(hook) - self.assertEqual( - [hook], context_manager._factory._facade_cfg['on_engine_create']) - - @context_manager.reader - def go(context): - hook.assert_called_once_with(context.session.bind) - - go(oslo_context.RequestContext()) - -# TODO(zzzeek): test configuration options, e.g. like -# test_sqlalchemy->test_creation_from_config diff --git a/oslo_db/tests/sqlalchemy/test_exc_filters.py b/oslo_db/tests/sqlalchemy/test_exc_filters.py deleted file mode 100644 index 17153a7..0000000 --- a/oslo_db/tests/sqlalchemy/test_exc_filters.py +++ /dev/null @@ -1,1361 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test exception filters applied to engines.""" - -import contextlib -import itertools - -import mock -from oslotest import base as oslo_test_base -import six -import sqlalchemy as sqla -from sqlalchemy import event -import sqlalchemy.exc -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import mapper - -from oslo_db import exception -from oslo_db.sqlalchemy import engines -from oslo_db.sqlalchemy import exc_filters -from oslo_db.sqlalchemy import test_base -from oslo_db.tests import utils as test_utils - -_TABLE_NAME = '__tmp__test__tmp__' - - -class _SQLAExceptionMatcher(object): - def assertInnerException( - self, - matched, exception_type, message, sql=None, params=None): - - exc = matched.inner_exception - self.assertSQLAException(exc, exception_type, message, sql, params) - - def assertSQLAException( - self, - exc, exception_type, message, sql=None, params=None): - if isinstance(exception_type, (type, tuple)): - self.assertTrue(issubclass(exc.__class__, exception_type)) - else: - self.assertEqual(exception_type, exc.__class__.__name__) - if isinstance(message, tuple): - self.assertEqual( - [m.lower() - if isinstance(m, six.string_types) else m for m in message], - [a.lower() - if isinstance(a, six.string_types) else a - for a in exc.orig.args] - ) - else: - self.assertEqual(message.lower(), str(exc.orig).lower()) - if sql is not None: - if params is not None: - if '?' in exc.statement: - self.assertEqual(sql, exc.statement) - self.assertEqual(params, exc.params) - else: - self.assertEqual(sql % params, exc.statement % exc.params) - else: - self.assertEqual(sql, exc.statement) - - -class TestsExceptionFilter(_SQLAExceptionMatcher, oslo_test_base.BaseTestCase): - - class Error(Exception): - """DBAPI base error. - - This exception and subclasses are used in a mock context - within these tests. - - """ - - class DataError(Error): - pass - - class OperationalError(Error): - pass - - class InterfaceError(Error): - pass - - class InternalError(Error): - pass - - class IntegrityError(Error): - pass - - class ProgrammingError(Error): - pass - - class TransactionRollbackError(OperationalError): - """Special psycopg2-only error class. - - SQLAlchemy has an issue with this per issue #3075: - - https://bitbucket.org/zzzeek/sqlalchemy/issue/3075/ - - """ - - def setUp(self): - super(TestsExceptionFilter, self).setUp() - self.engine = sqla.create_engine("sqlite://") - exc_filters.register_engine(self.engine) - self.engine.connect().close() # initialize - - @contextlib.contextmanager - def _dbapi_fixture(self, dialect_name, is_disconnect=False): - engine = self.engine - with test_utils.nested( - mock.patch.object(engine.dialect.dbapi, - "Error", - self.Error), - mock.patch.object(engine.dialect, "name", dialect_name), - mock.patch.object(engine.dialect, - "is_disconnect", - lambda *args: is_disconnect) - ): - yield - - @contextlib.contextmanager - def _fixture(self, dialect_name, exception, is_disconnect=False): - - def do_execute(self, cursor, statement, parameters, **kw): - raise exception - - engine = self.engine - - # ensure the engine has done its initial checks against the - # DB as we are going to be removing its ability to execute a - # statement - self.engine.connect().close() - - with test_utils.nested( - mock.patch.object(engine.dialect, "do_execute", do_execute), - # replace the whole DBAPI rather than patching "Error" - # as some DBAPIs might not be patchable (?) - mock.patch.object(engine.dialect, - "dbapi", - mock.Mock(Error=self.Error)), - mock.patch.object(engine.dialect, "name", dialect_name), - mock.patch.object(engine.dialect, - "is_disconnect", - lambda *args: is_disconnect) - ): - yield - - def _run_test(self, dialect_name, statement, raises, expected, - is_disconnect=False, params=()): - with self._fixture(dialect_name, raises, is_disconnect=is_disconnect): - with self.engine.connect() as conn: - matched = self.assertRaises( - expected, conn.execute, statement, params - ) - return matched - - -class TestFallthroughsAndNonDBAPI(TestsExceptionFilter): - - def test_generic_dbapi(self): - matched = self._run_test( - "mysql", "select you_made_a_programming_error", - self.ProgrammingError("Error 123, you made a mistake"), - exception.DBError - ) - self.assertInnerException( - matched, - "ProgrammingError", - "Error 123, you made a mistake", - 'select you_made_a_programming_error', ()) - - def test_generic_dbapi_disconnect(self): - matched = self._run_test( - "mysql", "select the_db_disconnected", - self.InterfaceError("connection lost"), - exception.DBConnectionError, - is_disconnect=True - ) - self.assertInnerException( - matched, - "InterfaceError", "connection lost", - "select the_db_disconnected", ()), - - def test_operational_dbapi_disconnect(self): - matched = self._run_test( - "mysql", "select the_db_disconnected", - self.OperationalError("connection lost"), - exception.DBConnectionError, - is_disconnect=True - ) - self.assertInnerException( - matched, - "OperationalError", "connection lost", - "select the_db_disconnected", ()), - - def test_operational_error_asis(self): - """Test operational errors. - - test that SQLAlchemy OperationalErrors that aren't disconnects - are passed through without wrapping. - """ - - matched = self._run_test( - "mysql", "select some_operational_error", - self.OperationalError("some op error"), - sqla.exc.OperationalError - ) - self.assertSQLAException( - matched, - "OperationalError", "some op error" - ) - - def test_unicode_encode(self): - # intentionally generate a UnicodeEncodeError, as its - # constructor is quite complicated and seems to be non-public - # or at least not documented anywhere. - uee_ref = None - try: - six.u('\u2435').encode('ascii') - except UnicodeEncodeError as uee: - # Python3.x added new scoping rules here (sadly) - # http://legacy.python.org/dev/peps/pep-3110/#semantic-changes - uee_ref = uee - - self._run_test( - "postgresql", six.u('select \u2435'), - uee_ref, - exception.DBInvalidUnicodeParameter - ) - - def test_garden_variety(self): - matched = self._run_test( - "mysql", "select some_thing_that_breaks", - AttributeError("mysqldb has an attribute error"), - exception.DBError - ) - self.assertEqual("mysqldb has an attribute error", matched.args[0]) - - -class TestNonExistentConstraint( - _SQLAExceptionMatcher, - test_base.DbTestCase): - - def setUp(self): - super(TestNonExistentConstraint, self).setUp() - - meta = sqla.MetaData(bind=self.engine) - - self.table_1 = sqla.Table( - "resource_foo", meta, - sqla.Column("id", sqla.Integer, primary_key=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - self.table_1.create() - - -class TestNonExistentConstraintPostgreSQL( - TestNonExistentConstraint, - test_base.PostgreSQLOpportunisticTestCase): - - def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentConstraint, - self.engine.execute, - sqla.schema.DropConstraint( - sqla.ForeignKeyConstraint(["id"], ["baz.id"], - name="bar_fkey", - table=self.table_1)), - ) - self.assertInnerException( - matched, - "ProgrammingError", - "constraint \"bar_fkey\" of relation " - "\"resource_foo\" does not exist\n", - "ALTER TABLE resource_foo DROP CONSTRAINT bar_fkey", - ) - self.assertEqual("resource_foo", matched.table) - self.assertEqual("bar_fkey", matched.constraint) - - -class TestNonExistentConstraintMySQL( - TestNonExistentConstraint, - test_base.MySQLOpportunisticTestCase): - - def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentConstraint, - self.engine.execute, - sqla.schema.DropConstraint( - sqla.ForeignKeyConstraint(["id"], ["baz.id"], - name="bar_fkey", - table=self.table_1)), - ) - # NOTE(jd) Cannot check precisely with assertInnerException since MySQL - # error are not the same depending on its version… - self.assertIsInstance(matched.inner_exception, - sqlalchemy.exc.InternalError) - if matched.table is not None: - self.assertEqual("resource_foo", matched.table) - if matched.constraint is not None: - self.assertEqual("bar_fkey", matched.constraint) - - -class TestNonExistentTable( - _SQLAExceptionMatcher, - test_base.DbTestCase): - - def setUp(self): - super(TestNonExistentTable, self).setUp() - - self.meta = sqla.MetaData(bind=self.engine) - - self.table_1 = sqla.Table( - "foo", self.meta, - sqla.Column("id", sqla.Integer, primary_key=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentTable, - self.engine.execute, - sqla.schema.DropTable(self.table_1), - ) - self.assertInnerException( - matched, - "OperationalError", - "no such table: foo", - "\nDROP TABLE foo", - ) - self.assertEqual("foo", matched.table) - - -class TestNonExistentTablePostgreSQL( - TestNonExistentTable, - test_base.PostgreSQLOpportunisticTestCase): - - def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentTable, - self.engine.execute, - sqla.schema.DropTable(self.table_1), - ) - self.assertInnerException( - matched, - "ProgrammingError", - "table \"foo\" does not exist\n", - "\nDROP TABLE foo", - ) - self.assertEqual("foo", matched.table) - - -class TestNonExistentTableMySQL( - TestNonExistentTable, - test_base.MySQLOpportunisticTestCase): - - def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentTable, - self.engine.execute, - sqla.schema.DropTable(self.table_1), - ) - # NOTE(jd) Cannot check precisely with assertInnerException since MySQL - # error are not the same depending on its version… - self.assertIsInstance(matched.inner_exception, - sqlalchemy.exc.InternalError) - self.assertEqual("foo", matched.table) - - -class TestReferenceErrorSQLite(_SQLAExceptionMatcher, test_base.DbTestCase): - - def setUp(self): - super(TestReferenceErrorSQLite, self).setUp() - - meta = sqla.MetaData(bind=self.engine) - - self.table_1 = sqla.Table( - "resource_foo", meta, - sqla.Column("id", sqla.Integer, primary_key=True), - sqla.Column("foo", sqla.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - self.table_1.create() - - self.table_2 = sqla.Table( - "resource_entity", meta, - sqla.Column("id", sqla.Integer, primary_key=True), - sqla.Column("foo_id", sqla.Integer, - sqla.ForeignKey("resource_foo.id", name="foo_fkey")), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - self.table_2.create() - - def test_raise(self): - self.engine.execute("PRAGMA foreign_keys = ON;") - - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_2.insert({'id': 1, 'foo_id': 2}) - ) - - self.assertInnerException( - matched, - "IntegrityError", - "FOREIGN KEY constraint failed", - 'INSERT INTO resource_entity (id, foo_id) VALUES (?, ?)', - (1, 2) - ) - - self.assertIsNone(matched.table) - self.assertIsNone(matched.constraint) - self.assertIsNone(matched.key) - self.assertIsNone(matched.key_table) - - def test_raise_delete(self): - self.engine.execute("PRAGMA foreign_keys = ON;") - - with self.engine.connect() as conn: - conn.execute(self.table_1.insert({"id": 1234, "foo": 42})) - conn.execute(self.table_2.insert({"id": 4321, "foo_id": 1234})) - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_1.delete() - ) - self.assertInnerException( - matched, - "IntegrityError", - "foreign key constraint failed", - "DELETE FROM resource_foo", - (), - ) - - self.assertIsNone(matched.table) - self.assertIsNone(matched.constraint) - self.assertIsNone(matched.key) - self.assertIsNone(matched.key_table) - - -class TestReferenceErrorPostgreSQL(TestReferenceErrorSQLite, - test_base.PostgreSQLOpportunisticTestCase): - def test_raise(self): - params = {'id': 1, 'foo_id': 2} - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_2.insert(params) - ) - self.assertInnerException( - matched, - "IntegrityError", - "insert or update on table \"resource_entity\" " - "violates foreign key constraint \"foo_fkey\"\nDETAIL: Key " - "(foo_id)=(2) is not present in table \"resource_foo\".\n", - "INSERT INTO resource_entity (id, foo_id) VALUES (%(id)s, " - "%(foo_id)s)", - params, - ) - - self.assertEqual("resource_entity", matched.table) - self.assertEqual("foo_fkey", matched.constraint) - self.assertEqual("foo_id", matched.key) - self.assertEqual("resource_foo", matched.key_table) - - def test_raise_delete(self): - with self.engine.connect() as conn: - conn.execute(self.table_1.insert({"id": 1234, "foo": 42})) - conn.execute(self.table_2.insert({"id": 4321, "foo_id": 1234})) - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_1.delete() - ) - self.assertInnerException( - matched, - "IntegrityError", - "update or delete on table \"resource_foo\" violates foreign key " - "constraint \"foo_fkey\" on table \"resource_entity\"\n" - "DETAIL: Key (id)=(1234) is still referenced from " - "table \"resource_entity\".\n", - "DELETE FROM resource_foo", - {}, - ) - - self.assertEqual("resource_foo", matched.table) - self.assertEqual("foo_fkey", matched.constraint) - self.assertEqual("id", matched.key) - self.assertEqual("resource_entity", matched.key_table) - - -class TestReferenceErrorMySQL(TestReferenceErrorSQLite, - test_base.MySQLOpportunisticTestCase): - def test_raise(self): - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_2.insert({'id': 1, 'foo_id': 2}) - ) - - self.assertInnerException( - matched, - "IntegrityError", - (1452, "Cannot add or update a child row: a " - "foreign key constraint fails (`{0}`.`resource_entity`, " - "CONSTRAINT `foo_fkey` FOREIGN KEY (`foo_id`) REFERENCES " - "`resource_foo` (`id`))".format(self.engine.url.database)), - "INSERT INTO resource_entity (id, foo_id) VALUES (%s, %s)", - (1, 2) - ) - self.assertEqual("resource_entity", matched.table) - self.assertEqual("foo_fkey", matched.constraint) - self.assertEqual("foo_id", matched.key) - self.assertEqual("resource_foo", matched.key_table) - - def test_raise_ansi_quotes(self): - with self.engine.connect() as conn: - conn.detach() # will not be returned to the pool when closed - - # this is incompatible with some internals of the engine - conn.execute("SET SESSION sql_mode = 'ANSI';") - - matched = self.assertRaises( - exception.DBReferenceError, - conn.execute, - self.table_2.insert({'id': 1, 'foo_id': 2}) - ) - - self.assertInnerException( - matched, - "IntegrityError", - ( - 1452, - 'Cannot add or update a child row: a ' - 'foreign key constraint fails ("{0}"."resource_entity", ' - 'CONSTRAINT "foo_fkey" FOREIGN KEY ("foo_id") REFERENCES ' - '"resource_foo" ("id"))'.format(self.engine.url.database) - ), - "INSERT INTO resource_entity (id, foo_id) VALUES (%s, %s)", - (1, 2) - ) - self.assertEqual("resource_entity", matched.table) - self.assertEqual("foo_fkey", matched.constraint) - self.assertEqual("foo_id", matched.key) - self.assertEqual("resource_foo", matched.key_table) - - def test_raise_delete(self): - with self.engine.connect() as conn: - conn.execute(self.table_1.insert({"id": 1234, "foo": 42})) - conn.execute(self.table_2.insert({"id": 4321, "foo_id": 1234})) - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_1.delete() - ) - self.assertInnerException( - matched, - "IntegrityError", - ( - 1451, - "Cannot delete or update a parent row: a foreign key " - "constraint fails (`{0}`.`resource_entity`, " - "constraint `foo_fkey` " - "foreign key (`foo_id`) references " - "`resource_foo` (`id`))".format(self.engine.url.database) - ), - "DELETE FROM resource_foo", - (), - ) - - self.assertEqual("resource_entity", matched.table) - self.assertEqual("foo_fkey", matched.constraint) - self.assertEqual("foo_id", matched.key) - self.assertEqual("resource_foo", matched.key_table) - - -class TestExceptionCauseMySQLSavepoint(test_base.MySQLOpportunisticTestCase): - def setUp(self): - super(TestExceptionCauseMySQLSavepoint, self).setUp() - - Base = declarative_base() - - class A(Base): - __tablename__ = 'a' - - id = sqla.Column(sqla.Integer, primary_key=True) - - __table_args__ = {'mysql_engine': 'InnoDB'} - - Base.metadata.create_all(self.engine) - - self.A = A - - def test_cause_for_failed_flush_plus_no_savepoint(self): - session = self.sessionmaker() - - with session.begin(): - session.add(self.A(id=1)) - try: - - with session.begin(): - - try: - with session.begin_nested(): - session.execute("rollback") - session.add(self.A(id=1)) - - # outermost is the failed SAVEPOINT rollback - # from the "with session.begin_nested()" - except exception.DBError as dbe_inner: - - # first "cause" is the failed SAVEPOINT rollback - # from inside of flush(), when it fails - self.assertTrue( - isinstance( - dbe_inner.cause, - exception.DBError - ) - ) - - # second "cause" is then the actual DB duplicate - self.assertTrue( - isinstance( - dbe_inner.cause.cause, - exception.DBDuplicateEntry - ) - ) - except exception.DBError as dbe_outer: - self.assertTrue( - isinstance( - dbe_outer.cause, - exception.DBDuplicateEntry - ) - ) - - # resets itself afterwards - try: - with session.begin(): - session.add(self.A(id=1)) - except exception.DBError as dbe_outer: - self.assertIsNone(dbe_outer.cause) - - -class TestDBDataErrorSQLite(_SQLAExceptionMatcher, test_base.DbTestCase): - - def setUp(self): - super(TestDBDataErrorSQLite, self).setUp() - - if six.PY3: - self.skip("SQLite database supports unicode value for python3") - - meta = sqla.MetaData(bind=self.engine) - - self.table_1 = sqla.Table( - "resource_foo", meta, - sqla.Column("name", sqla.String), - ) - self.table_1.create() - - def test_raise(self): - - matched = self.assertRaises( - exception.DBDataError, - self.engine.execute, - self.table_1.insert({'name': u'\u2713'.encode('utf-8')}) - ) - - self.assertInnerException( - matched, - "ProgrammingError", - "You must not use 8-bit bytestrings unless you use a " - "text_factory that can interpret 8-bit bytestrings " - "(like text_factory = str). It is highly recommended that " - "you instead just switch your application to Unicode strings.", - "INSERT INTO resource_foo (name) VALUES (?)", - (u'\u2713'.encode('utf-8'),) - ) - - -class TestConstraint(TestsExceptionFilter): - def test_postgresql(self): - matched = self._run_test( - "postgresql", "insert into resource some_values", - self.IntegrityError( - "new row for relation \"resource\" violates " - "check constraint \"ck_started_before_ended\""), - exception.DBConstraintError, - ) - self.assertEqual("resource", matched.table) - self.assertEqual("ck_started_before_ended", matched.check_name) - - -class TestDuplicate(TestsExceptionFilter): - - def _run_dupe_constraint_test(self, dialect_name, message, - expected_columns=['a', 'b'], - expected_value=None): - matched = self._run_test( - dialect_name, "insert into table some_values", - self.IntegrityError(message), - exception.DBDuplicateEntry - ) - self.assertEqual(expected_columns, matched.columns) - self.assertEqual(expected_value, matched.value) - - def _not_dupe_constraint_test(self, dialect_name, statement, message, - expected_cls): - matched = self._run_test( - dialect_name, statement, - self.IntegrityError(message), - expected_cls - ) - self.assertInnerException( - matched, - "IntegrityError", - str(self.IntegrityError(message)), - statement - ) - - def test_sqlite(self): - self._run_dupe_constraint_test("sqlite", 'column a, b are not unique') - - def test_sqlite_3_7_16_or_3_8_2_and_higher(self): - self._run_dupe_constraint_test( - "sqlite", - 'UNIQUE constraint failed: tbl.a, tbl.b') - - def test_sqlite_dupe_primary_key(self): - self._run_dupe_constraint_test( - "sqlite", - "PRIMARY KEY must be unique 'insert into t values(10)'", - expected_columns=[]) - - def test_mysql_pymysql(self): - self._run_dupe_constraint_test( - "mysql", - '(1062, "Duplicate entry ' - '\'2-3\' for key \'uniq_tbl0a0b\'")', expected_value='2-3') - self._run_dupe_constraint_test( - "mysql", - '(1062, "Duplicate entry ' - '\'\' for key \'uniq_tbl0a0b\'")', expected_value='') - - def test_mysql_mysqlconnector(self): - self._run_dupe_constraint_test( - "mysql", - '1062 (23000): Duplicate entry ' - '\'2-3\' for key \'uniq_tbl0a0b\'")', expected_value='2-3') - - def test_postgresql(self): - self._run_dupe_constraint_test( - 'postgresql', - 'duplicate key value violates unique constraint' - '"uniq_tbl0a0b"' - '\nDETAIL: Key (a, b)=(2, 3) already exists.\n', - expected_value='2, 3' - ) - - def test_mysql_single(self): - self._run_dupe_constraint_test( - "mysql", - "1062 (23000): Duplicate entry '2' for key 'b'", - expected_columns=['b'], - expected_value='2' - ) - - def test_mysql_binary(self): - self._run_dupe_constraint_test( - "mysql", - "(1062, \'Duplicate entry " - "\\\'\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E\\\' " - "for key \\\'PRIMARY\\\'\')", - expected_columns=['PRIMARY'], - expected_value="\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E" - ) - self._run_dupe_constraint_test( - "mysql", - "(1062, \'Duplicate entry " - "''\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E!,' " - "for key 'PRIMARY'\')", - expected_columns=['PRIMARY'], - expected_value="'\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E!," - ) - - def test_postgresql_single(self): - self._run_dupe_constraint_test( - 'postgresql', - 'duplicate key value violates unique constraint "uniq_tbl0b"\n' - 'DETAIL: Key (b)=(2) already exists.\n', - expected_columns=['b'], - expected_value='2' - ) - - def test_unsupported_backend(self): - self._not_dupe_constraint_test( - "nonexistent", "insert into table some_values", - self.IntegrityError("constraint violation"), - exception.DBError - ) - - def test_ibm_db_sa(self): - self._run_dupe_constraint_test( - 'ibm_db_sa', - 'SQL0803N One or more values in the INSERT statement, UPDATE ' - 'statement, or foreign key update caused by a DELETE statement are' - ' not valid because the primary key, unique constraint or unique ' - 'index identified by "2" constrains table "NOVA.KEY_PAIRS" from ' - 'having duplicate values for the index key.', - expected_columns=[] - ) - - def test_ibm_db_sa_notadupe(self): - self._not_dupe_constraint_test( - 'ibm_db_sa', - 'ALTER TABLE instance_types ADD CONSTRAINT ' - 'uniq_name_x_deleted UNIQUE (name, deleted)', - 'SQL0542N The column named "NAME" cannot be a column of a ' - 'primary key or unique key constraint because it can contain null ' - 'values.', - exception.DBError - ) - - -class TestDeadlock(TestsExceptionFilter): - statement = ('SELECT quota_usages.created_at AS ' - 'quota_usages_created_at FROM quota_usages ' - 'WHERE quota_usages.project_id = %(project_id_1)s ' - 'AND quota_usages.deleted = %(deleted_1)s FOR UPDATE') - params = { - 'project_id_1': '8891d4478bbf48ad992f050cdf55e9b5', - 'deleted_1': 0 - } - - def _run_deadlock_detect_test( - self, dialect_name, message, - orig_exception_cls=TestsExceptionFilter.OperationalError): - self._run_test( - dialect_name, self.statement, - orig_exception_cls(message), - exception.DBDeadlock, - params=self.params - ) - - def _not_deadlock_test( - self, dialect_name, message, - expected_cls, expected_dbapi_cls, - orig_exception_cls=TestsExceptionFilter.OperationalError): - - matched = self._run_test( - dialect_name, self.statement, - orig_exception_cls(message), - expected_cls, - params=self.params - ) - - if isinstance(matched, exception.DBError): - matched = matched.inner_exception - - self.assertEqual(expected_dbapi_cls, matched.orig.__class__.__name__) - - def test_mysql_pymysql_deadlock(self): - self._run_deadlock_detect_test( - "mysql", - "(1213, 'Deadlock found when trying " - "to get lock; try restarting " - "transaction')" - ) - - def test_mysql_pymysql_galera_deadlock(self): - self._run_deadlock_detect_test( - "mysql", - "(1205, 'Lock wait timeout exceeded; " - "try restarting transaction')", - orig_exception_cls=self.InternalError - ) - - def test_mysql_mysqlconnector_deadlock(self): - self._run_deadlock_detect_test( - "mysql", - "1213 (40001): Deadlock found when trying to get lock; try " - "restarting transaction", - orig_exception_cls=self.InternalError - ) - - def test_mysql_not_deadlock(self): - self._not_deadlock_test( - "mysql", - "(1005, 'some other error')", - sqla.exc.OperationalError, # note OperationalErrors are sent thru - "OperationalError", - ) - - def test_postgresql_deadlock(self): - self._run_deadlock_detect_test( - "postgresql", - "deadlock detected", - orig_exception_cls=self.TransactionRollbackError - ) - - def test_postgresql_not_deadlock(self): - self._not_deadlock_test( - "postgresql", - 'relation "fake" does not exist', - # can be either depending on #3075 - (exception.DBError, sqla.exc.OperationalError), - "TransactionRollbackError", - orig_exception_cls=self.TransactionRollbackError - ) - - def test_ibm_db_sa_deadlock(self): - self._run_deadlock_detect_test( - "ibm_db_sa", - "SQL0911N The current transaction has been " - "rolled back because of a deadlock or timeout", - # use the lowest class b.c. I don't know what actual error - # class DB2's driver would raise for this - orig_exception_cls=self.Error - ) - - def test_ibm_db_sa_not_deadlock(self): - self._not_deadlock_test( - "ibm_db_sa", - "SQL01234B Some other error.", - exception.DBError, - "Error", - orig_exception_cls=self.Error - ) - - -class TestDataError(TestsExceptionFilter): - def _run_bad_data_test(self, dialect_name, message, error_class): - self._run_test(dialect_name, - "INSERT INTO TABLE some_values", - error_class(message), - exception.DBDataError) - - def test_bad_data_incorrect_string(self): - # Error sourced from https://bugs.launchpad.net/cinder/+bug/1393871 - self._run_bad_data_test("mysql", - '(1366, "Incorrect string value: \'\\xF0\' ' - 'for column \'resource\' at row 1"', - self.OperationalError) - - def test_bad_data_out_of_range(self): - # Error sourced from https://bugs.launchpad.net/cinder/+bug/1463379 - self._run_bad_data_test("mysql", - '(1264, "Out of range value for column ' - '\'resource\' at row 1"', - self.DataError) - - -class IntegrationTest(test_base.DbTestCase): - """Test an actual error-raising round trips against the database.""" - - def setUp(self): - super(IntegrationTest, self).setUp() - meta = sqla.MetaData() - self.test_table = sqla.Table( - _TABLE_NAME, meta, - sqla.Column('id', sqla.Integer, - primary_key=True, nullable=False), - sqla.Column('counter', sqla.Integer, - nullable=False), - sqla.UniqueConstraint('counter', - name='uniq_counter')) - self.test_table.create(self.engine) - self.addCleanup(self.test_table.drop, self.engine) - - class Foo(object): - def __init__(self, counter): - self.counter = counter - mapper(Foo, self.test_table) - self.Foo = Foo - - def test_flush_wrapper_duplicate_entry(self): - """test a duplicate entry exception.""" - - _session = self.sessionmaker() - - with _session.begin(): - foo = self.Foo(counter=1) - _session.add(foo) - - _session.begin() - self.addCleanup(_session.rollback) - foo = self.Foo(counter=1) - _session.add(foo) - self.assertRaises(exception.DBDuplicateEntry, _session.flush) - - def test_autoflush_wrapper_duplicate_entry(self): - """Test a duplicate entry exception raised. - - test a duplicate entry exception raised via query.all()-> autoflush - """ - - _session = self.sessionmaker() - - with _session.begin(): - foo = self.Foo(counter=1) - _session.add(foo) - - _session.begin() - self.addCleanup(_session.rollback) - foo = self.Foo(counter=1) - _session.add(foo) - self.assertTrue(_session.autoflush) - self.assertRaises(exception.DBDuplicateEntry, - _session.query(self.Foo).all) - - def test_flush_wrapper_plain_integrity_error(self): - """test a plain integrity error wrapped as DBError.""" - - _session = self.sessionmaker() - - with _session.begin(): - foo = self.Foo(counter=1) - _session.add(foo) - - _session.begin() - self.addCleanup(_session.rollback) - foo = self.Foo(counter=None) - _session.add(foo) - self.assertRaises(exception.DBError, _session.flush) - - def test_flush_wrapper_operational_error(self): - """test an operational error from flush() raised as-is.""" - - _session = self.sessionmaker() - - with _session.begin(): - foo = self.Foo(counter=1) - _session.add(foo) - - _session.begin() - self.addCleanup(_session.rollback) - foo = self.Foo(counter=sqla.func.imfake(123)) - _session.add(foo) - matched = self.assertRaises(sqla.exc.OperationalError, _session.flush) - self.assertTrue("no such function" in str(matched)) - - def test_query_wrapper_operational_error(self): - """test an operational error from query.all() raised as-is.""" - - _session = self.sessionmaker() - - _session.begin() - self.addCleanup(_session.rollback) - q = _session.query(self.Foo).filter( - self.Foo.counter == sqla.func.imfake(123)) - matched = self.assertRaises(sqla.exc.OperationalError, q.all) - self.assertTrue("no such function" in str(matched)) - - -class TestDBDisconnected(TestsExceptionFilter): - - @contextlib.contextmanager - def _fixture( - self, - dialect_name, exception, num_disconnects, is_disconnect=True): - engine = self.engine - - event.listen(engine, "engine_connect", engines._connect_ping_listener) - - real_do_execute = engine.dialect.do_execute - counter = itertools.count(1) - - def fake_do_execute(self, *arg, **kw): - if next(counter) > num_disconnects: - return real_do_execute(self, *arg, **kw) - else: - raise exception - - with self._dbapi_fixture(dialect_name): - with test_utils.nested( - mock.patch.object(engine.dialect, - "do_execute", - fake_do_execute), - mock.patch.object(engine.dialect, - "is_disconnect", - mock.Mock(return_value=is_disconnect)) - ): - yield - - def _test_ping_listener_disconnected( - self, dialect_name, exc_obj, is_disconnect=True): - with self._fixture(dialect_name, exc_obj, 1, is_disconnect): - conn = self.engine.connect() - with conn.begin(): - self.assertEqual(1, conn.scalar(sqla.select([1]))) - self.assertFalse(conn.closed) - self.assertFalse(conn.invalidated) - self.assertTrue(conn.in_transaction()) - - with self._fixture(dialect_name, exc_obj, 2, is_disconnect): - self.assertRaises( - exception.DBConnectionError, - self.engine.connect - ) - - # test implicit execution - with self._fixture(dialect_name, exc_obj, 1): - self.assertEqual(1, self.engine.scalar(sqla.select([1]))) - - def test_mysql_ping_listener_disconnected(self): - for code in [2006, 2013, 2014, 2045, 2055]: - self._test_ping_listener_disconnected( - "mysql", - self.OperationalError('%d MySQL server has gone away' % code) - ) - - def test_mysql_ping_listener_disconnected_regex_only(self): - # intentionally set the is_disconnect flag to False - # in the "sqlalchemy" layer to make sure the regexp - # on _is_db_connection_error is catching - for code in [2002, 2003, 2006, 2013]: - self._test_ping_listener_disconnected( - "mysql", - self.OperationalError('%d MySQL server has gone away' % code), - is_disconnect=False - ) - - def test_mysql_galera_non_primary_disconnected(self): - self._test_ping_listener_disconnected( - "mysql", - self.OperationalError('(1047, \'Unknown command\') ' - '\'SELECT DATABASE()\' ()') - ) - - def test_mysql_galera_non_primary_disconnected_regex_only(self): - # intentionally set the is_disconnect flag to False - # in the "sqlalchemy" layer to make sure the regexp - # on _is_db_connection_error is catching - self._test_ping_listener_disconnected( - "mysql", - self.OperationalError('(1047, \'Unknown command\') ' - '\'SELECT DATABASE()\' ()'), - is_disconnect=False - ) - - def test_db2_ping_listener_disconnected(self): - self._test_ping_listener_disconnected( - "ibm_db_sa", - self.OperationalError( - 'SQL30081N: DB2 Server connection is no longer active') - ) - - def test_db2_ping_listener_disconnected_regex_only(self): - self._test_ping_listener_disconnected( - "ibm_db_sa", - self.OperationalError( - 'SQL30081N: DB2 Server connection is no longer active'), - is_disconnect=False - ) - - def test_postgresql_ping_listener_disconnected(self): - self._test_ping_listener_disconnected( - "postgresql", - self.OperationalError( - "could not connect to server: Connection refused"), - ) - - def test_postgresql_ping_listener_disconnected_regex_only(self): - self._test_ping_listener_disconnected( - "postgresql", - self.OperationalError( - "could not connect to server: Connection refused"), - is_disconnect=False - ) - - -class TestDBConnectRetry(TestsExceptionFilter): - - def _run_test(self, dialect_name, exception, count, retries): - counter = itertools.count() - - engine = self.engine - - # empty out the connection pool - engine.dispose() - - connect_fn = engine.dialect.connect - - def cant_connect(*arg, **kw): - if next(counter) < count: - raise exception - else: - return connect_fn(*arg, **kw) - - with self._dbapi_fixture(dialect_name): - with mock.patch.object(engine.dialect, "connect", cant_connect): - return engines._test_connection(engine, retries, .01) - - def test_connect_no_retries(self): - conn = self._run_test( - "mysql", - self.OperationalError("Error: (2003) something wrong"), - 2, 0 - ) - # didnt connect because nothing was tried - self.assertIsNone(conn) - - def test_connect_inifinite_retries(self): - conn = self._run_test( - "mysql", - self.OperationalError("Error: (2003) something wrong"), - 2, -1 - ) - # conn is good - self.assertEqual(1, conn.scalar(sqla.select([1]))) - - def test_connect_retry_past_failure(self): - conn = self._run_test( - "mysql", - self.OperationalError("Error: (2003) something wrong"), - 2, 3 - ) - # conn is good - self.assertEqual(1, conn.scalar(sqla.select([1]))) - - def test_connect_retry_not_candidate_exception(self): - self.assertRaises( - sqla.exc.OperationalError, # remember, we pass OperationalErrors - # through at the moment :) - self._run_test, - "mysql", - self.OperationalError("Error: (2015) I can't connect period"), - 2, 3 - ) - - def test_connect_retry_stops_infailure(self): - self.assertRaises( - exception.DBConnectionError, - self._run_test, - "mysql", - self.OperationalError("Error: (2003) something wrong"), - 3, 2 - ) - - def test_db2_error_positive(self): - conn = self._run_test( - "ibm_db_sa", - self.OperationalError("blah blah -30081 blah blah"), - 2, -1 - ) - # conn is good - self.assertEqual(1, conn.scalar(sqla.select([1]))) - - def test_db2_error_negative(self): - self.assertRaises( - sqla.exc.OperationalError, - self._run_test, - "ibm_db_sa", - self.OperationalError("blah blah -39981 blah blah"), - 2, 3 - ) - - -class TestDBConnectPingWrapping(TestsExceptionFilter): - - def setUp(self): - super(TestDBConnectPingWrapping, self).setUp() - event.listen( - self.engine, "engine_connect", engines._connect_ping_listener) - - @contextlib.contextmanager - def _fixture( - self, dialect_name, exception, good_conn_count, - is_disconnect=True): - engine = self.engine - - # empty out the connection pool - engine.dispose() - - connect_fn = engine.dialect.connect - real_do_execute = engine.dialect.do_execute - - counter = itertools.count(1) - - def cant_execute(*arg, **kw): - value = next(counter) - if value > good_conn_count: - raise exception - else: - return real_do_execute(*arg, **kw) - - def cant_connect(*arg, **kw): - value = next(counter) - if value > good_conn_count: - raise exception - else: - return connect_fn(*arg, **kw) - - with self._dbapi_fixture(dialect_name, is_disconnect=is_disconnect): - with mock.patch.object(engine.dialect, "connect", cant_connect): - with mock.patch.object( - engine.dialect, "do_execute", cant_execute): - yield - - def _test_ping_listener_disconnected( - self, dialect_name, exc_obj, is_disconnect=True): - with self._fixture(dialect_name, exc_obj, 3, is_disconnect): - conn = self.engine.connect() - self.assertEqual(1, conn.scalar(sqla.select([1]))) - conn.close() - - with self._fixture(dialect_name, exc_obj, 1, is_disconnect): - self.assertRaises( - exception.DBConnectionError, - self.engine.connect - ) - self.assertRaises( - exception.DBConnectionError, - self.engine.connect - ) - self.assertRaises( - exception.DBConnectionError, - self.engine.connect - ) - - with self._fixture(dialect_name, exc_obj, 1, is_disconnect): - self.assertRaises( - exception.DBConnectionError, - self.engine.contextual_connect - ) - self.assertRaises( - exception.DBConnectionError, - self.engine.contextual_connect - ) - self.assertRaises( - exception.DBConnectionError, - self.engine.contextual_connect - ) - - def test_mysql_w_disconnect_flag(self): - for code in [2002, 2003, 2002]: - self._test_ping_listener_disconnected( - "mysql", - self.OperationalError('%d MySQL server has gone away' % code) - ) - - def test_mysql_wo_disconnect_flag(self): - for code in [2002, 2003]: - self._test_ping_listener_disconnected( - "mysql", - self.OperationalError('%d MySQL server has gone away' % code), - is_disconnect=False - ) diff --git a/oslo_db/tests/sqlalchemy/test_fixtures.py b/oslo_db/tests/sqlalchemy/test_fixtures.py deleted file mode 100644 index 5d63d69..0000000 --- a/oslo_db/tests/sqlalchemy/test_fixtures.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_db.sqlalchemy import provision -from oslo_db.sqlalchemy import test_base -from oslotest import base as oslo_test_base - - -class BackendSkipTest(oslo_test_base.BaseTestCase): - - def test_skip_no_dbapi(self): - - class FakeDatabaseOpportunisticFixture(test_base.DbFixture): - DRIVER = 'postgresql' - - class SomeTest(test_base.DbTestCase): - FIXTURE = FakeDatabaseOpportunisticFixture - - def runTest(self): - pass - - st = SomeTest() - - # patch in replacement lookup dictionaries to avoid - # leaking from/to other tests - with mock.patch( - "oslo_db.sqlalchemy.provision." - "Backend.backends_by_database_type", { - "postgresql": - provision.Backend("postgresql", "postgresql://")}): - st._database_resources = {} - st._db_not_available = {} - st._schema_resources = {} - - with mock.patch( - "sqlalchemy.create_engine", - mock.Mock(side_effect=ImportError())): - - self.assertEqual([], st.resources) - - ex = self.assertRaises( - self.skipException, - st.setUp - ) - - self.assertEqual( - "Backend 'postgresql' is unavailable: No DBAPI installed", - str(ex) - ) - - def test_skip_no_such_backend(self): - - class FakeDatabaseOpportunisticFixture(test_base.DbFixture): - DRIVER = 'postgresql+nosuchdbapi' - - class SomeTest(test_base.DbTestCase): - FIXTURE = FakeDatabaseOpportunisticFixture - - def runTest(self): - pass - - st = SomeTest() - - ex = self.assertRaises( - self.skipException, - st.setUp - ) - - self.assertEqual( - "Backend 'postgresql+nosuchdbapi' is unavailable: No such backend", - str(ex) - ) diff --git a/oslo_db/tests/sqlalchemy/test_migrate_cli.py b/oslo_db/tests/sqlalchemy/test_migrate_cli.py deleted file mode 100644 index dc30139..0000000 --- a/oslo_db/tests/sqlalchemy/test_migrate_cli.py +++ /dev/null @@ -1,348 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import alembic -import mock -from oslotest import base as test_base -import sqlalchemy - -from oslo_db import exception -from oslo_db.sqlalchemy.migration_cli import ext_alembic -from oslo_db.sqlalchemy.migration_cli import ext_migrate -from oslo_db.sqlalchemy.migration_cli import manager - - -class MockWithCmp(mock.MagicMock): - - order = 0 - - def __init__(self, *args, **kwargs): - super(MockWithCmp, self).__init__(*args, **kwargs) - - self.__lt__ = lambda self, other: self.order < other.order - - -@mock.patch(('oslo_db.sqlalchemy.migration_cli.' - 'ext_alembic.alembic.command')) -class TestAlembicExtension(test_base.BaseTestCase): - - def setUp(self): - self.migration_config = {'alembic_ini_path': '.', - 'db_url': 'sqlite://'} - self.engine = sqlalchemy.create_engine(self.migration_config['db_url']) - self.alembic = ext_alembic.AlembicExtension( - self.engine, self.migration_config) - super(TestAlembicExtension, self).setUp() - - def test_check_enabled_true(self, command): - """Check enabled returns True - - Verifies that enabled returns True on non empty - alembic_ini_path conf variable - """ - self.assertTrue(self.alembic.enabled) - - def test_check_enabled_false(self, command): - """Check enabled returns False - - Verifies enabled returns False on empty alembic_ini_path variable - """ - self.migration_config['alembic_ini_path'] = '' - alembic = ext_alembic.AlembicExtension( - self.engine, self.migration_config) - self.assertFalse(alembic.enabled) - - def test_upgrade_none(self, command): - self.alembic.upgrade(None) - command.upgrade.assert_called_once_with(self.alembic.config, 'head') - - def test_upgrade_normal(self, command): - self.alembic.upgrade('131daa') - command.upgrade.assert_called_once_with(self.alembic.config, '131daa') - - def test_downgrade_none(self, command): - self.alembic.downgrade(None) - command.downgrade.assert_called_once_with(self.alembic.config, 'base') - - def test_downgrade_int(self, command): - self.alembic.downgrade(111) - command.downgrade.assert_called_once_with(self.alembic.config, 'base') - - def test_downgrade_normal(self, command): - self.alembic.downgrade('131daa') - command.downgrade.assert_called_once_with( - self.alembic.config, '131daa') - - def test_revision(self, command): - self.alembic.revision(message='test', autogenerate=True) - command.revision.assert_called_once_with( - self.alembic.config, message='test', autogenerate=True) - - def test_stamp(self, command): - self.alembic.stamp('stamp') - command.stamp.assert_called_once_with( - self.alembic.config, revision='stamp') - - def test_version(self, command): - version = self.alembic.version() - self.assertIsNone(version) - - def test_has_revision(self, command): - with mock.patch(('oslo_db.sqlalchemy.migration_cli.' - 'ext_alembic.alembic_script')) as mocked: - self.alembic.config.get_main_option = mock.Mock() - # since alembic_script is mocked and no exception is raised, call - # will result in success - self.assertIs(True, self.alembic.has_revision('test')) - self.alembic.config.get_main_option.assert_called_once_with( - 'script_location') - mocked.ScriptDirectory().get_revision.assert_called_once_with( - 'test') - self.assertIs(True, self.alembic.has_revision(None)) - self.assertIs(True, self.alembic.has_revision('head')) - # relative revision, should be True for alembic - self.assertIs(True, self.alembic.has_revision('+1')) - - def test_has_revision_negative(self, command): - with mock.patch(('oslo_db.sqlalchemy.migration_cli.' - 'ext_alembic.alembic_script')) as mocked: - mocked.ScriptDirectory().get_revision.side_effect = ( - alembic.util.CommandError) - self.alembic.config.get_main_option = mock.Mock() - # exception is raised, the call should be false - self.assertIs(False, self.alembic.has_revision('test')) - self.alembic.config.get_main_option.assert_called_once_with( - 'script_location') - mocked.ScriptDirectory().get_revision.assert_called_once_with( - 'test') - - -@mock.patch(('oslo_db.sqlalchemy.migration_cli.' - 'ext_migrate.migration')) -class TestMigrateExtension(test_base.BaseTestCase): - - def setUp(self): - self.migration_config = {'migration_repo_path': '.', - 'db_url': 'sqlite://'} - self.engine = sqlalchemy.create_engine(self.migration_config['db_url']) - self.migrate = ext_migrate.MigrateExtension( - self.engine, self.migration_config) - super(TestMigrateExtension, self).setUp() - - def test_check_enabled_true(self, migration): - self.assertTrue(self.migrate.enabled) - - def test_check_enabled_false(self, migration): - self.migration_config['migration_repo_path'] = '' - migrate = ext_migrate.MigrateExtension( - self.engine, self.migration_config) - self.assertFalse(migrate.enabled) - - def test_upgrade_head(self, migration): - self.migrate.upgrade('head') - migration.db_sync.assert_called_once_with( - self.migrate.engine, self.migrate.repository, None, init_version=0) - - def test_upgrade_normal(self, migration): - self.migrate.upgrade(111) - migration.db_sync.assert_called_once_with( - mock.ANY, self.migrate.repository, 111, init_version=0) - - def test_downgrade_init_version_from_base(self, migration): - self.migrate.downgrade('base') - migration.db_sync.assert_called_once_with( - self.migrate.engine, self.migrate.repository, mock.ANY, - init_version=mock.ANY) - - def test_downgrade_init_version_from_none(self, migration): - self.migrate.downgrade(None) - migration.db_sync.assert_called_once_with( - self.migrate.engine, self.migrate.repository, mock.ANY, - init_version=mock.ANY) - - def test_downgrade_normal(self, migration): - self.migrate.downgrade(101) - migration.db_sync.assert_called_once_with( - self.migrate.engine, self.migrate.repository, 101, init_version=0) - - def test_version(self, migration): - self.migrate.version() - migration.db_version.assert_called_once_with( - self.migrate.engine, self.migrate.repository, init_version=0) - - def test_change_init_version(self, migration): - self.migration_config['init_version'] = 101 - migrate = ext_migrate.MigrateExtension( - self.engine, self.migration_config) - migrate.downgrade(None) - migration.db_sync.assert_called_once_with( - migrate.engine, - self.migrate.repository, - self.migration_config['init_version'], - init_version=self.migration_config['init_version']) - - def test_has_revision(self, command): - with mock.patch(('oslo_db.sqlalchemy.migration_cli.' - 'ext_migrate.migrate_version')) as mocked: - self.migrate.has_revision('test') - mocked.Collection().version.assert_called_once_with('test') - # tip of the branch should always be True - self.assertIs(True, self.migrate.has_revision(None)) - - def test_has_revision_negative(self, command): - with mock.patch(('oslo_db.sqlalchemy.migration_cli.' - 'ext_migrate.migrate_version')) as mocked: - mocked.Collection().version.side_effect = ValueError - self.assertIs(False, self.migrate.has_revision('test')) - mocked.Collection().version.assert_called_once_with('test') - # relative revision, should be False for migrate - self.assertIs(False, self.migrate.has_revision('+1')) - - -class TestMigrationManager(test_base.BaseTestCase): - - def setUp(self): - self.migration_config = {'alembic_ini_path': '.', - 'migrate_repo_path': '.', - 'db_url': 'sqlite://'} - engine = sqlalchemy.create_engine(self.migration_config['db_url']) - self.migration_manager = manager.MigrationManager( - self.migration_config, engine) - self.ext = mock.Mock() - self.ext.obj.version = mock.Mock(return_value=0) - self.migration_manager._manager.extensions = [self.ext] - super(TestMigrationManager, self).setUp() - - def test_manager_update(self): - self.migration_manager.upgrade('head') - self.ext.obj.upgrade.assert_called_once_with('head') - - def test_manager_update_revision_none(self): - self.migration_manager.upgrade(None) - self.ext.obj.upgrade.assert_called_once_with(None) - - def test_downgrade_normal_revision(self): - self.migration_manager.downgrade('111abcd') - self.ext.obj.downgrade.assert_called_once_with('111abcd') - - def test_version(self): - self.migration_manager.version() - self.ext.obj.version.assert_called_once_with() - - def test_version_return_value(self): - version = self.migration_manager.version() - self.assertEqual(0, version) - - def test_revision_message_autogenerate(self): - self.migration_manager.revision('test', True) - self.ext.obj.revision.assert_called_once_with('test', True) - - def test_revision_only_message(self): - self.migration_manager.revision('test', False) - self.ext.obj.revision.assert_called_once_with('test', False) - - def test_stamp(self): - self.migration_manager.stamp('stamp') - self.ext.obj.stamp.assert_called_once_with('stamp') - - def test_wrong_config(self): - err = self.assertRaises(ValueError, - manager.MigrationManager, - {'wrong_key': 'sqlite://'}) - self.assertEqual('Either database url or engine must be provided.', - err.args[0]) - - -class TestMigrationMultipleExtensions(test_base.BaseTestCase): - - def setUp(self): - self.migration_config = {'alembic_ini_path': '.', - 'migrate_repo_path': '.', - 'db_url': 'sqlite://'} - engine = sqlalchemy.create_engine(self.migration_config['db_url']) - self.migration_manager = manager.MigrationManager( - self.migration_config, engine) - self.first_ext = MockWithCmp() - self.first_ext.obj.order = 1 - self.first_ext.obj.upgrade.return_value = 100 - self.first_ext.obj.downgrade.return_value = 0 - self.second_ext = MockWithCmp() - self.second_ext.obj.order = 2 - self.second_ext.obj.upgrade.return_value = 200 - self.second_ext.obj.downgrade.return_value = 100 - self.migration_manager._manager.extensions = [self.first_ext, - self.second_ext] - super(TestMigrationMultipleExtensions, self).setUp() - - def test_upgrade_right_order(self): - results = self.migration_manager.upgrade(None) - self.assertEqual([100, 200], results) - - def test_downgrade_right_order(self): - results = self.migration_manager.downgrade(None) - self.assertEqual([100, 0], results) - - def test_upgrade_does_not_go_too_far(self): - self.first_ext.obj.has_revision.return_value = True - self.second_ext.obj.has_revision.return_value = False - self.second_ext.obj.upgrade.side_effect = AssertionError( - 'this method should not have been called') - - results = self.migration_manager.upgrade(100) - self.assertEqual([100], results) - - def test_downgrade_does_not_go_too_far(self): - self.second_ext.obj.has_revision.return_value = True - self.first_ext.obj.has_revision.return_value = False - self.first_ext.obj.downgrade.side_effect = AssertionError( - 'this method should not have been called') - - results = self.migration_manager.downgrade(100) - self.assertEqual([100], results) - - def test_upgrade_checks_rev_existence(self): - self.first_ext.obj.has_revision.return_value = False - self.second_ext.obj.has_revision.return_value = False - - # upgrade to a specific non-existent revision should fail - self.assertRaises(exception.DBMigrationError, - self.migration_manager.upgrade, 100) - - # upgrade to the "head" should succeed - self.assertEqual([100, 200], self.migration_manager.upgrade(None)) - - # let's assume the second ext has the revision, upgrade should succeed - self.second_ext.obj.has_revision.return_value = True - self.assertEqual([100, 200], self.migration_manager.upgrade(200)) - - # upgrade to the "head" should still succeed - self.assertEqual([100, 200], self.migration_manager.upgrade(None)) - - def test_downgrade_checks_rev_existence(self): - self.first_ext.obj.has_revision.return_value = False - self.second_ext.obj.has_revision.return_value = False - - # upgrade to a specific non-existent revision should fail - self.assertRaises(exception.DBMigrationError, - self.migration_manager.downgrade, 100) - - # downgrade to the "base" should succeed - self.assertEqual([100, 0], self.migration_manager.downgrade(None)) - - # let's assume the second ext has the revision, downgrade should - # succeed - self.first_ext.obj.has_revision.return_value = True - self.assertEqual([100, 0], self.migration_manager.downgrade(200)) - - # downgrade to the "base" should still succeed - self.assertEqual([100, 0], self.migration_manager.downgrade(None)) - self.assertEqual([100, 0], self.migration_manager.downgrade('base')) diff --git a/oslo_db/tests/sqlalchemy/test_migration_common.py b/oslo_db/tests/sqlalchemy/test_migration_common.py deleted file mode 100644 index 9041b90..0000000 --- a/oslo_db/tests/sqlalchemy/test_migration_common.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2013 Mirantis Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os -import tempfile - -from migrate import exceptions as migrate_exception -from migrate.versioning import api as versioning_api -import mock -import sqlalchemy - -from oslo_db import exception as db_exception -from oslo_db.sqlalchemy import migration -from oslo_db.sqlalchemy import test_base -from oslo_db.tests import utils as test_utils - - -class TestMigrationCommon(test_base.DbTestCase): - def setUp(self): - super(TestMigrationCommon, self).setUp() - - migration._REPOSITORY = None - self.path = tempfile.mkdtemp('test_migration') - self.path1 = tempfile.mkdtemp('test_migration') - self.return_value = '/home/openstack/migrations' - self.return_value1 = '/home/extension/migrations' - self.init_version = 1 - self.test_version = 123 - - self.patcher_repo = mock.patch.object(migration, 'Repository') - self.repository = self.patcher_repo.start() - self.repository.side_effect = [self.return_value, self.return_value1] - - self.mock_api_db = mock.patch.object(versioning_api, 'db_version') - self.mock_api_db_version = self.mock_api_db.start() - self.mock_api_db_version.return_value = self.test_version - - def tearDown(self): - os.rmdir(self.path) - self.mock_api_db.stop() - self.patcher_repo.stop() - super(TestMigrationCommon, self).tearDown() - - def test_find_migrate_repo_path_not_found(self): - self.assertRaises( - db_exception.DBMigrationError, - migration._find_migrate_repo, - "/foo/bar/", - ) - self.assertIsNone(migration._REPOSITORY) - - def test_find_migrate_repo_called_once(self): - my_repository = migration._find_migrate_repo(self.path) - self.repository.assert_called_once_with(self.path) - self.assertEqual(self.return_value, my_repository) - - def test_find_migrate_repo_called_few_times(self): - repo1 = migration._find_migrate_repo(self.path) - repo2 = migration._find_migrate_repo(self.path1) - self.assertNotEqual(repo1, repo2) - - def test_db_version_control(self): - with test_utils.nested( - mock.patch.object(migration, '_find_migrate_repo'), - mock.patch.object(versioning_api, 'version_control'), - ) as (mock_find_repo, mock_version_control): - mock_find_repo.return_value = self.return_value - - version = migration.db_version_control( - self.engine, self.path, self.test_version) - - self.assertEqual(self.test_version, version) - mock_version_control.assert_called_once_with( - self.engine, self.return_value, self.test_version) - - @mock.patch.object(migration, '_find_migrate_repo') - @mock.patch.object(versioning_api, 'version_control') - def test_db_version_control_version_less_than_actual_version( - self, mock_version_control, mock_find_repo): - mock_find_repo.return_value = self.return_value - mock_version_control.side_effect = (migrate_exception. - DatabaseAlreadyControlledError) - self.assertRaises(db_exception.DBMigrationError, - migration.db_version_control, self.engine, - self.path, self.test_version - 1) - - @mock.patch.object(migration, '_find_migrate_repo') - @mock.patch.object(versioning_api, 'version_control') - def test_db_version_control_version_greater_than_actual_version( - self, mock_version_control, mock_find_repo): - mock_find_repo.return_value = self.return_value - mock_version_control.side_effect = (migrate_exception. - InvalidVersionError) - self.assertRaises(db_exception.DBMigrationError, - migration.db_version_control, self.engine, - self.path, self.test_version + 1) - - def test_db_version_return(self): - ret_val = migration.db_version(self.engine, self.path, - self.init_version) - self.assertEqual(self.test_version, ret_val) - - def test_db_version_raise_not_controlled_error_first(self): - with mock.patch.object(migration, 'db_version_control') as mock_ver: - - self.mock_api_db_version.side_effect = [ - migrate_exception.DatabaseNotControlledError('oups'), - self.test_version] - - ret_val = migration.db_version(self.engine, self.path, - self.init_version) - self.assertEqual(self.test_version, ret_val) - mock_ver.assert_called_once_with(self.engine, self.path, - version=self.init_version) - - def test_db_version_raise_not_controlled_error_tables(self): - with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta: - self.mock_api_db_version.side_effect = \ - migrate_exception.DatabaseNotControlledError('oups') - my_meta = mock.MagicMock() - my_meta.tables = {'a': 1, 'b': 2} - mock_meta.return_value = my_meta - - self.assertRaises( - db_exception.DBMigrationError, migration.db_version, - self.engine, self.path, self.init_version) - - @mock.patch.object(versioning_api, 'version_control') - def test_db_version_raise_not_controlled_error_no_tables(self, mock_vc): - with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta: - self.mock_api_db_version.side_effect = ( - migrate_exception.DatabaseNotControlledError('oups'), - self.init_version) - my_meta = mock.MagicMock() - my_meta.tables = {} - mock_meta.return_value = my_meta - migration.db_version(self.engine, self.path, self.init_version) - - mock_vc.assert_called_once_with(self.engine, self.return_value1, - self.init_version) - - def test_db_sync_wrong_version(self): - self.assertRaises(db_exception.DBMigrationError, - migration.db_sync, self.engine, self.path, 'foo') - - def test_db_sync_upgrade(self): - init_ver = 55 - with test_utils.nested( - mock.patch.object(migration, '_find_migrate_repo'), - mock.patch.object(versioning_api, 'upgrade') - ) as (mock_find_repo, mock_upgrade): - - mock_find_repo.return_value = self.return_value - self.mock_api_db_version.return_value = self.test_version - 1 - - migration.db_sync(self.engine, self.path, self.test_version, - init_ver) - - mock_upgrade.assert_called_once_with( - self.engine, self.return_value, self.test_version) - - def test_db_sync_downgrade(self): - with test_utils.nested( - mock.patch.object(migration, '_find_migrate_repo'), - mock.patch.object(versioning_api, 'downgrade') - ) as (mock_find_repo, mock_downgrade): - - mock_find_repo.return_value = self.return_value - self.mock_api_db_version.return_value = self.test_version + 1 - - migration.db_sync(self.engine, self.path, self.test_version) - - mock_downgrade.assert_called_once_with( - self.engine, self.return_value, self.test_version) - - def test_db_sync_sanity_called(self): - with test_utils.nested( - mock.patch.object(migration, '_find_migrate_repo'), - mock.patch.object(migration, '_db_schema_sanity_check'), - mock.patch.object(versioning_api, 'downgrade') - ) as (mock_find_repo, mock_sanity, mock_downgrade): - - mock_find_repo.return_value = self.return_value - migration.db_sync(self.engine, self.path, self.test_version) - - self.assertEqual([mock.call(self.engine), mock.call(self.engine)], - mock_sanity.call_args_list) - - def test_db_sync_sanity_skipped(self): - with test_utils.nested( - mock.patch.object(migration, '_find_migrate_repo'), - mock.patch.object(migration, '_db_schema_sanity_check'), - mock.patch.object(versioning_api, 'downgrade') - ) as (mock_find_repo, mock_sanity, mock_downgrade): - - mock_find_repo.return_value = self.return_value - migration.db_sync(self.engine, self.path, self.test_version, - sanity_check=False) - - self.assertFalse(mock_sanity.called) - - def test_db_sanity_table_not_utf8(self): - with mock.patch.object(self, 'engine') as mock_eng: - type(mock_eng).name = mock.PropertyMock(return_value='mysql') - mock_eng.execute.return_value = [['table_A', 'latin1'], - ['table_B', 'latin1']] - - self.assertRaises(ValueError, migration._db_schema_sanity_check, - mock_eng) - - def test_db_sanity_table_not_utf8_exclude_migrate_tables(self): - with mock.patch.object(self, 'engine') as mock_eng: - type(mock_eng).name = mock.PropertyMock(return_value='mysql') - # NOTE(morganfainberg): Check both lower and upper case versions - # of the migration table names (validate case insensitivity in - # the sanity check. - mock_eng.execute.return_value = [['migrate_version', 'latin1'], - ['alembic_version', 'latin1'], - ['MIGRATE_VERSION', 'latin1'], - ['ALEMBIC_VERSION', 'latin1']] - - migration._db_schema_sanity_check(mock_eng) diff --git a/oslo_db/tests/sqlalchemy/test_migrations.py b/oslo_db/tests/sqlalchemy/test_migrations.py deleted file mode 100644 index 7a17fb4..0000000 --- a/oslo_db/tests/sqlalchemy/test_migrations.py +++ /dev/null @@ -1,566 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2012-2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -from migrate.versioning import api as versioning_api -import mock -from oslotest import base as test -import six -import sqlalchemy as sa -import sqlalchemy.ext.declarative as sa_decl - -from oslo_db import exception as exc -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import test_migrations as migrate - - -class TestWalkVersions(test.BaseTestCase, migrate.WalkVersionsMixin): - migration_api = mock.MagicMock() - REPOSITORY = mock.MagicMock() - engine = mock.MagicMock() - INIT_VERSION = versioning_api.VerNum(4) - - @property - def migrate_engine(self): - return self.engine - - def test_migrate_up(self): - self.migration_api.db_version.return_value = 141 - - self.migrate_up(141) - - self.migration_api.upgrade.assert_called_with( - self.engine, self.REPOSITORY, 141) - self.migration_api.db_version.assert_called_with( - self.engine, self.REPOSITORY) - - @staticmethod - def _fake_upgrade_boom(*args, **kwargs): - raise exc.DBMigrationError("boom") - - def test_migrate_up_fail(self): - version = 141 - self.migration_api.db_version.return_value = version - expected_output = (u"Failed to migrate to version %(version)s on " - "engine %(engine)s\n" % - {'version': version, 'engine': self.engine}) - - with mock.patch.object(self.migration_api, - 'upgrade', - side_effect=self._fake_upgrade_boom): - log = self.useFixture(fixtures.FakeLogger()) - self.assertRaises(exc.DBMigrationError, self.migrate_up, version) - self.assertEqual(expected_output, log.output) - - def test_migrate_up_with_data(self): - test_value = {"a": 1, "b": 2} - self.migration_api.db_version.return_value = 141 - self._pre_upgrade_141 = mock.MagicMock() - self._pre_upgrade_141.return_value = test_value - self._check_141 = mock.MagicMock() - - self.migrate_up(141, True) - - self._pre_upgrade_141.assert_called_with(self.engine) - self._check_141.assert_called_with(self.engine, test_value) - - def test_migrate_down(self): - self.migration_api.db_version.return_value = 42 - - self.assertTrue(self.migrate_down(42)) - self.migration_api.db_version.assert_called_with( - self.engine, self.REPOSITORY) - - def test_migrate_down_not_implemented(self): - with mock.patch.object(self.migration_api, - 'downgrade', - side_effect=NotImplementedError): - self.assertFalse(self.migrate_down(self.engine, 42)) - - def test_migrate_down_with_data(self): - self._post_downgrade_043 = mock.MagicMock() - self.migration_api.db_version.return_value = 42 - - self.migrate_down(42, True) - - self._post_downgrade_043.assert_called_with(self.engine) - - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up') - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down') - def test_walk_versions_all_default(self, migrate_up, migrate_down): - self.REPOSITORY.latest = versioning_api.VerNum(20) - self.migration_api.db_version.return_value = self.INIT_VERSION - - self.walk_versions() - - self.migration_api.version_control.assert_called_with( - self.engine, self.REPOSITORY, self.INIT_VERSION) - self.migration_api.db_version.assert_called_with( - self.engine, self.REPOSITORY) - - versions = range(int(self.INIT_VERSION) + 1, - int(self.REPOSITORY.latest) + 1) - upgraded = [mock.call(v, with_data=True) - for v in versions] - self.assertEqual(upgraded, self.migrate_up.call_args_list) - - downgraded = [mock.call(v - 1) for v in reversed(versions)] - self.assertEqual(downgraded, self.migrate_down.call_args_list) - - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up') - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down') - def test_walk_versions_all_true(self, migrate_up, migrate_down): - self.REPOSITORY.latest = versioning_api.VerNum(20) - self.migration_api.db_version.return_value = self.INIT_VERSION - - self.walk_versions(snake_walk=True, downgrade=True) - - versions = range(int(self.INIT_VERSION) + 1, - int(self.REPOSITORY.latest) + 1) - upgraded = [] - for v in versions: - upgraded.append(mock.call(v, with_data=True)) - upgraded.append(mock.call(v)) - upgraded.extend([mock.call(v) for v in reversed(versions)]) - self.assertEqual(upgraded, self.migrate_up.call_args_list) - - downgraded_1 = [mock.call(v - 1, with_data=True) for v in versions] - downgraded_2 = [] - for v in reversed(versions): - downgraded_2.append(mock.call(v - 1)) - downgraded_2.append(mock.call(v - 1)) - downgraded = downgraded_1 + downgraded_2 - self.assertEqual(downgraded, self.migrate_down.call_args_list) - - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up') - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down') - def test_walk_versions_true_false(self, migrate_up, migrate_down): - self.REPOSITORY.latest = versioning_api.VerNum(20) - self.migration_api.db_version.return_value = self.INIT_VERSION - - self.walk_versions(snake_walk=True, downgrade=False) - - versions = range(int(self.INIT_VERSION) + 1, - int(self.REPOSITORY.latest) + 1) - - upgraded = [] - for v in versions: - upgraded.append(mock.call(v, with_data=True)) - upgraded.append(mock.call(v)) - self.assertEqual(upgraded, self.migrate_up.call_args_list) - - downgraded = [mock.call(v - 1, with_data=True) for v in versions] - self.assertEqual(downgraded, self.migrate_down.call_args_list) - - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up') - @mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down') - def test_walk_versions_all_false(self, migrate_up, migrate_down): - self.REPOSITORY.latest = versioning_api.VerNum(20) - self.migration_api.db_version.return_value = self.INIT_VERSION - - self.walk_versions(snake_walk=False, downgrade=False) - - versions = range(int(self.INIT_VERSION) + 1, - int(self.REPOSITORY.latest) + 1) - - upgraded = [mock.call(v, with_data=True) for v in versions] - self.assertEqual(upgraded, self.migrate_up.call_args_list) - - -class ModelsMigrationSyncMixin(test_base.DbTestCase): - - def setUp(self): - super(ModelsMigrationSyncMixin, self).setUp() - - self.metadata = sa.MetaData() - self.metadata_migrations = sa.MetaData() - - sa.Table( - 'testtbl', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('spam', sa.String(10), nullable=False), - sa.Column('eggs', sa.DateTime), - sa.Column('foo', sa.Boolean, - server_default=sa.sql.expression.true()), - sa.Column('bool_wo_default', sa.Boolean), - sa.Column('bar', sa.Numeric(10, 5)), - sa.Column('defaulttest', sa.Integer, server_default='5'), - sa.Column('defaulttest2', sa.String(8), server_default=''), - sa.Column('defaulttest3', sa.String(5), server_default="test"), - sa.Column('defaulttest4', sa.Enum('first', 'second', - name='testenum'), - server_default="first"), - sa.Column('variant', sa.BigInteger()), - sa.Column('variant2', sa.BigInteger(), server_default='0'), - sa.Column('fk_check', sa.String(36), nullable=False), - sa.UniqueConstraint('spam', 'eggs', name='uniq_cons'), - ) - - BASE = sa_decl.declarative_base(metadata=self.metadata) - - class TestModel(BASE): - __tablename__ = 'testtbl' - __table_args__ = ( - sa.UniqueConstraint('spam', 'eggs', name='uniq_cons'), - ) - - id = sa.Column('id', sa.Integer, primary_key=True) - spam = sa.Column('spam', sa.String(10), nullable=False) - eggs = sa.Column('eggs', sa.DateTime) - foo = sa.Column('foo', sa.Boolean, - server_default=sa.sql.expression.true()) - fk_check = sa.Column('fk_check', sa.String(36), nullable=False) - bool_wo_default = sa.Column('bool_wo_default', sa.Boolean) - defaulttest = sa.Column('defaulttest', - sa.Integer, server_default='5') - defaulttest2 = sa.Column('defaulttest2', sa.String(8), - server_default='') - defaulttest3 = sa.Column('defaulttest3', sa.String(5), - server_default="test") - defaulttest4 = sa.Column('defaulttest4', sa.Enum('first', 'second', - name='testenum'), - server_default="first") - variant = sa.Column(sa.BigInteger().with_variant( - sa.Integer(), 'sqlite')) - variant2 = sa.Column(sa.BigInteger().with_variant( - sa.Integer(), 'sqlite'), server_default='0') - bar = sa.Column('bar', sa.Numeric(10, 5)) - - class ModelThatShouldNotBeCompared(BASE): - __tablename__ = 'testtbl2' - - id = sa.Column('id', sa.Integer, primary_key=True) - spam = sa.Column('spam', sa.String(10), nullable=False) - - def get_metadata(self): - return self.metadata - - def get_engine(self): - return self.engine - - def db_sync(self, engine): - self.metadata_migrations.create_all(bind=engine) - - def include_object(self, object_, name, type_, reflected, compare_to): - if type_ == 'table': - return name == 'testtbl' - else: - return True - - def _test_models_not_sync_filtered(self): - self.metadata_migrations.clear() - sa.Table( - 'table', self.metadata_migrations, - sa.Column('fk_check', sa.String(36), nullable=False), - sa.PrimaryKeyConstraint('fk_check'), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('spam', sa.String(8), nullable=True), - sa.Column('eggs', sa.DateTime), - sa.Column('foo', sa.Boolean, - server_default=sa.sql.expression.false()), - sa.Column('bool_wo_default', sa.Boolean, unique=True), - sa.Column('bar', sa.BigInteger), - sa.Column('defaulttest', sa.Integer, server_default='7'), - sa.Column('defaulttest2', sa.String(8), server_default=''), - sa.Column('defaulttest3', sa.String(5), server_default="fake"), - sa.Column('defaulttest4', - sa.Enum('first', 'second', name='testenum'), - server_default="first"), - sa.Column('fk_check', sa.String(36), nullable=False), - sa.UniqueConstraint('spam', 'foo', name='uniq_cons'), - sa.ForeignKeyConstraint(['fk_check'], ['table.fk_check']), - mysql_engine='InnoDB' - ) - - with mock.patch.object(self, 'filter_metadata_diff') as filter_mock: - def filter_diffs(diffs): - # test filter returning only constraint related diffs - return [ - diff - for diff in diffs - if 'constraint' in diff[0] - ] - filter_mock.side_effect = filter_diffs - - msg = six.text_type(self.assertRaises(AssertionError, - self.test_models_sync)) - self.assertNotIn('defaulttest', msg) - self.assertNotIn('defaulttest3', msg) - self.assertNotIn('remove_fk', msg) - self.assertIn('constraint', msg) - - def _test_models_not_sync(self): - self.metadata_migrations.clear() - sa.Table( - 'table', self.metadata_migrations, - sa.Column('fk_check', sa.String(36), nullable=False), - sa.PrimaryKeyConstraint('fk_check'), - mysql_engine='InnoDB' - ) - sa.Table( - 'testtbl', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('spam', sa.String(8), nullable=True), - sa.Column('eggs', sa.DateTime), - sa.Column('foo', sa.Boolean, - server_default=sa.sql.expression.false()), - sa.Column('bool_wo_default', sa.Boolean, unique=True), - sa.Column('bar', sa.BigInteger), - sa.Column('defaulttest', sa.Integer, server_default='7'), - sa.Column('defaulttest2', sa.String(8), server_default=''), - sa.Column('defaulttest3', sa.String(5), server_default="fake"), - sa.Column('defaulttest4', - sa.Enum('first', 'second', name='testenum'), - server_default="first"), - sa.Column('variant', sa.String(10)), - sa.Column('fk_check', sa.String(36), nullable=False), - sa.UniqueConstraint('spam', 'foo', name='uniq_cons'), - sa.ForeignKeyConstraint(['fk_check'], ['table.fk_check']), - mysql_engine='InnoDB' - ) - - msg = six.text_type(self.assertRaises(AssertionError, - self.test_models_sync)) - # NOTE(I159): Check mentioning of the table and columns. - # The log is invalid json, so we can't parse it and check it for - # full compliance. We have no guarantee of the log items ordering, - # so we can't use regexp. - self.assertTrue(msg.startswith( - 'Models and migration scripts aren\'t in sync:')) - self.assertIn('testtbl', msg) - self.assertIn('spam', msg) - self.assertIn('eggs', msg) # test that the unique constraint is added - self.assertIn('foo', msg) - self.assertIn('bar', msg) - self.assertIn('bool_wo_default', msg) - self.assertIn('defaulttest', msg) - self.assertIn('defaulttest3', msg) - self.assertIn('remove_fk', msg) - self.assertIn('variant', msg) - - -class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin, - migrate.ModelsMigrationsSync, - test_base.MySQLOpportunisticTestCase): - - def test_models_not_sync(self): - self._test_models_not_sync() - - def test_models_not_sync_filtered(self): - self._test_models_not_sync_filtered() - - -class ModelsMigrationsSyncPsql(ModelsMigrationSyncMixin, - migrate.ModelsMigrationsSync, - test_base.PostgreSQLOpportunisticTestCase): - - def test_models_not_sync(self): - self._test_models_not_sync() - - def test_models_not_sync_filtered(self): - self._test_models_not_sync_filtered() - - -class TestOldCheckForeignKeys(test_base.DbTestCase): - def setUp(self): - super(TestOldCheckForeignKeys, self).setUp() - - test = self - - class MigrateSync(migrate.ModelsMigrationsSync): - def get_engine(self): - return test.engine - - def get_metadata(self): - return test.metadata - - def db_sync(self): - raise NotImplementedError() - - self.migration_sync = MigrateSync() - - def _fk_added_fixture(self): - self.metadata = sa.MetaData() - self.metadata_migrations = sa.MetaData() - - sa.Table( - 'testtbl_one', self.metadata, - sa.Column('id', sa.Integer, primary_key=True), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_two', self.metadata, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('tone_id', sa.Integer), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_one', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_two', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column( - 'tone_id', sa.Integer, - sa.ForeignKey('testtbl_one.id', name="tone_id_fk")), - mysql_engine='InnoDB' - ) - - def _fk_removed_fixture(self): - self.metadata = sa.MetaData() - self.metadata_migrations = sa.MetaData() - - sa.Table( - 'testtbl_one', self.metadata, - sa.Column('id', sa.Integer, primary_key=True), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_two', self.metadata, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column( - 'tone_id', sa.Integer, - sa.ForeignKey('testtbl_one.id', name="tone_id_fk")), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_one', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_two', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('tone_id', sa.Integer), - mysql_engine='InnoDB' - ) - - def _fk_no_change_fixture(self): - self.metadata = sa.MetaData() - self.metadata_migrations = sa.MetaData() - - sa.Table( - 'testtbl_one', self.metadata, - sa.Column('id', sa.Integer, primary_key=True), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_two', self.metadata, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column( - 'tone_id', sa.Integer, - sa.ForeignKey('testtbl_one.id', name="tone_id_fk")), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_one', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - mysql_engine='InnoDB' - ) - - sa.Table( - 'testtbl_two', self.metadata_migrations, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column( - 'tone_id', sa.Integer, - sa.ForeignKey('testtbl_one.id', name="tone_id_fk")), - mysql_engine='InnoDB' - ) - - def _run_test(self): - self.metadata.create_all(bind=self.engine) - return self.migration_sync.check_foreign_keys( - self.metadata_migrations, self.engine) - - def _compare_diffs(self, diffs, compare_to): - diffs = [ - ( - cmd, - fk._get_colspec() if isinstance(fk, sa.ForeignKey) - else "tone_id_fk" if fk is None # sqlite workaround - else fk, - tname, fk_info - ) - for cmd, fk, tname, fk_info in diffs - ] - self.assertEqual(compare_to, diffs) - - def test_fk_added(self): - self._fk_added_fixture() - diffs = self._run_test() - - self._compare_diffs( - diffs, - [( - 'add_key', - 'testtbl_one.id', - 'testtbl_two', - self.migration_sync.FKInfo( - constrained_columns=('tone_id',), - referred_table='testtbl_one', - referred_columns=('id',)) - )] - ) - - def test_fk_removed(self): - self._fk_removed_fixture() - diffs = self._run_test() - - self._compare_diffs( - diffs, - [( - 'drop_key', - "tone_id_fk", - 'testtbl_two', - self.migration_sync.FKInfo( - constrained_columns=('tone_id',), - referred_table='testtbl_one', - referred_columns=('id',)) - )] - ) - - def test_fk_no_change(self): - self._fk_no_change_fixture() - diffs = self._run_test() - - self._compare_diffs( - diffs, - []) - - -class PGTestOldCheckForeignKeys( - TestOldCheckForeignKeys, test_base.PostgreSQLOpportunisticTestCase): - pass - - -class MySQLTestOldCheckForeignKeys( - TestOldCheckForeignKeys, test_base.MySQLOpportunisticTestCase): - pass diff --git a/oslo_db/tests/sqlalchemy/test_models.py b/oslo_db/tests/sqlalchemy/test_models.py deleted file mode 100644 index 674993d..0000000 --- a/oslo_db/tests/sqlalchemy/test_models.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2012 Cloudscaling Group, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from oslotest import base as oslo_test -from sqlalchemy import Column -from sqlalchemy import Integer, String -from sqlalchemy.ext.declarative import declarative_base - -from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import test_base - - -BASE = declarative_base() - - -class ModelBaseTest(test_base.DbTestCase): - def setUp(self): - super(ModelBaseTest, self).setUp() - self.mb = models.ModelBase() - self.ekm = ExtraKeysModel() - - def test_modelbase_has_dict_methods(self): - dict_methods = ('__getitem__', - '__setitem__', - '__contains__', - 'get', - 'update', - 'save', - 'items', - 'iteritems', - 'keys') - for method in dict_methods: - self.assertTrue(hasattr(models.ModelBase, method), - "Method %s() is not found" % method) - - def test_modelbase_is_iterable(self): - self.assertTrue(issubclass(models.ModelBase, collections.Iterable)) - - def test_modelbase_set(self): - self.mb['world'] = 'hello' - self.assertEqual('hello', self.mb['world']) - - def test_modelbase_update(self): - h = {'a': '1', 'b': '2'} - self.mb.update(h) - for key in h.keys(): - self.assertEqual(h[key], self.mb[key]) - - def test_modelbase_contains(self): - mb = models.ModelBase() - h = {'a': '1', 'b': '2'} - mb.update(h) - for key in h.keys(): - # Test 'in' syntax (instead of using .assertIn) - self.assertTrue(key in mb) - - self.assertFalse('non-existent-key' in mb) - - def test_modelbase_contains_exc(self): - class ErrorModel(models.ModelBase): - @property - def bug(self): - raise ValueError - - model = ErrorModel() - model.update({'attr': 5}) - - self.assertTrue('attr' in model) - self.assertRaises(ValueError, lambda: 'bug' in model) - - def test_modelbase_items_iteritems(self): - h = {'a': '1', 'b': '2'} - expected = { - 'id': None, - 'smth': None, - 'name': 'NAME', - 'a': '1', - 'b': '2', - } - self.ekm.update(h) - self.assertEqual(expected, dict(self.ekm.items())) - self.assertEqual(expected, dict(self.ekm.iteritems())) - - def test_modelbase_dict(self): - h = {'a': '1', 'b': '2'} - expected = { - 'id': None, - 'smth': None, - 'name': 'NAME', - 'a': '1', - 'b': '2', - } - self.ekm.update(h) - self.assertEqual(expected, dict(self.ekm)) - - def test_modelbase_iter(self): - expected = { - 'id': None, - 'smth': None, - 'name': 'NAME', - } - i = iter(self.ekm) - found_items = 0 - while True: - r = next(i, None) - if r is None: - break - self.assertEqual(expected[r[0]], r[1]) - found_items += 1 - - self.assertEqual(len(expected), found_items) - - def test_modelbase_keys(self): - self.assertEqual(set(('id', 'smth', 'name')), set(self.ekm.keys())) - - self.ekm.update({'a': '1', 'b': '2'}) - self.assertEqual(set(('a', 'b', 'id', 'smth', 'name')), - set(self.ekm.keys())) - - def test_modelbase_several_iters(self): - mb = ExtraKeysModel() - it1 = iter(mb) - it2 = iter(mb) - - self.assertFalse(it1 is it2) - self.assertEqual(dict(mb), dict(it1)) - self.assertEqual(dict(mb), dict(it2)) - - def test_extra_keys_empty(self): - """Test verifies that by default extra_keys return empty list.""" - self.assertEqual([], self.mb._extra_keys) - - def test_extra_keys_defined(self): - """Property _extra_keys will return list with attributes names.""" - self.assertEqual(['name'], self.ekm._extra_keys) - - def test_model_with_extra_keys(self): - data = dict(self.ekm) - self.assertEqual({'smth': None, - 'id': None, - 'name': 'NAME'}, - data) - - -class ExtraKeysModel(BASE, models.ModelBase): - __tablename__ = 'test_model' - - id = Column(Integer, primary_key=True) - smth = Column(String(255)) - - @property - def name(self): - return 'NAME' - - @property - def _extra_keys(self): - return ['name'] - - -class TimestampMixinTest(oslo_test.BaseTestCase): - - def test_timestampmixin_attr(self): - methods = ('created_at', - 'updated_at') - for method in methods: - self.assertTrue(hasattr(models.TimestampMixin, method), - "Method %s() is not found" % method) diff --git a/oslo_db/tests/sqlalchemy/test_options.py b/oslo_db/tests/sqlalchemy/test_options.py deleted file mode 100644 index 2f3479d..0000000 --- a/oslo_db/tests/sqlalchemy/test_options.py +++ /dev/null @@ -1,127 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_config import fixture as config - -from oslo_db import options -from oslo_db.tests import utils as test_utils - - -class DbApiOptionsTestCase(test_utils.BaseTestCase): - def setUp(self): - super(DbApiOptionsTestCase, self).setUp() - - config_fixture = self.useFixture(config.Config()) - self.conf = config_fixture.conf - self.conf.register_opts(options.database_opts, group='database') - self.config = config_fixture.config - - def test_deprecated_session_parameters(self): - path = self.create_tempfiles([["tmp", b"""[DEFAULT] -sql_connection=x://y.z -sql_min_pool_size=10 -sql_max_pool_size=20 -sql_max_retries=30 -sql_retry_interval=40 -sql_max_overflow=50 -sql_connection_debug=60 -sql_connection_trace=True -"""]])[0] - self.conf(['--config-file', path]) - self.assertEqual('x://y.z', self.conf.database.connection) - self.assertEqual(10, self.conf.database.min_pool_size) - self.assertEqual(20, self.conf.database.max_pool_size) - self.assertEqual(30, self.conf.database.max_retries) - self.assertEqual(40, self.conf.database.retry_interval) - self.assertEqual(50, self.conf.database.max_overflow) - self.assertEqual(60, self.conf.database.connection_debug) - self.assertEqual(True, self.conf.database.connection_trace) - - def test_session_parameters(self): - path = self.create_tempfiles([["tmp", b"""[database] -connection=x://y.z -min_pool_size=10 -max_pool_size=20 -max_retries=30 -retry_interval=40 -max_overflow=50 -connection_debug=60 -connection_trace=True -pool_timeout=7 -"""]])[0] - self.conf(['--config-file', path]) - self.assertEqual('x://y.z', self.conf.database.connection) - self.assertEqual(10, self.conf.database.min_pool_size) - self.assertEqual(20, self.conf.database.max_pool_size) - self.assertEqual(30, self.conf.database.max_retries) - self.assertEqual(40, self.conf.database.retry_interval) - self.assertEqual(50, self.conf.database.max_overflow) - self.assertEqual(60, self.conf.database.connection_debug) - self.assertEqual(True, self.conf.database.connection_trace) - self.assertEqual(7, self.conf.database.pool_timeout) - - def test_dbapi_database_deprecated_parameters(self): - path = self.create_tempfiles([['tmp', b'[DATABASE]\n' - b'sql_connection=fake_connection\n' - b'sql_idle_timeout=100\n' - b'sql_min_pool_size=99\n' - b'sql_max_pool_size=199\n' - b'sql_max_retries=22\n' - b'reconnect_interval=17\n' - b'sqlalchemy_max_overflow=101\n' - b'sqlalchemy_pool_timeout=5\n' - ]])[0] - self.conf(['--config-file', path]) - self.assertEqual('fake_connection', self.conf.database.connection) - self.assertEqual(100, self.conf.database.idle_timeout) - self.assertEqual(99, self.conf.database.min_pool_size) - self.assertEqual(199, self.conf.database.max_pool_size) - self.assertEqual(22, self.conf.database.max_retries) - self.assertEqual(17, self.conf.database.retry_interval) - self.assertEqual(101, self.conf.database.max_overflow) - self.assertEqual(5, self.conf.database.pool_timeout) - - def test_dbapi_database_deprecated_parameters_sql(self): - path = self.create_tempfiles([['tmp', b'[sql]\n' - b'connection=test_sql_connection\n' - b'idle_timeout=99\n' - ]])[0] - self.conf(['--config-file', path]) - self.assertEqual('test_sql_connection', self.conf.database.connection) - self.assertEqual(99, self.conf.database.idle_timeout) - - def test_deprecated_dbapi_parameters(self): - path = self.create_tempfiles([['tmp', b'[DEFAULT]\n' - b'db_backend=test_123\n' - ]])[0] - - self.conf(['--config-file', path]) - self.assertEqual('test_123', self.conf.database.backend) - - def test_dbapi_parameters(self): - path = self.create_tempfiles([['tmp', b'[database]\n' - b'backend=test_123\n' - ]])[0] - - self.conf(['--config-file', path]) - self.assertEqual('test_123', self.conf.database.backend) - - def test_set_defaults(self): - conf = cfg.ConfigOpts() - - options.set_defaults(conf, - connection='sqlite:///:memory:') - - self.assertTrue(len(conf.database.items()) > 1) - self.assertEqual('sqlite:///:memory:', conf.database.connection) diff --git a/oslo_db/tests/sqlalchemy/test_provision.py b/oslo_db/tests/sqlalchemy/test_provision.py deleted file mode 100644 index 53d2303..0000000 --- a/oslo_db/tests/sqlalchemy/test_provision.py +++ /dev/null @@ -1,213 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base as oslo_test_base -from sqlalchemy import exc as sa_exc -from sqlalchemy import inspect -from sqlalchemy import schema -from sqlalchemy import types - -from oslo_db import exception -from oslo_db.sqlalchemy import provision -from oslo_db.sqlalchemy import test_base - - -class DropAllObjectsTest(test_base.DbTestCase): - - def setUp(self): - super(DropAllObjectsTest, self).setUp() - - self.metadata = metadata = schema.MetaData() - schema.Table( - 'a', metadata, - schema.Column('id', types.Integer, primary_key=True), - mysql_engine='InnoDB' - ) - schema.Table( - 'b', metadata, - schema.Column('id', types.Integer, primary_key=True), - schema.Column('a_id', types.Integer, schema.ForeignKey('a.id')), - mysql_engine='InnoDB' - ) - schema.Table( - 'c', metadata, - schema.Column('id', types.Integer, primary_key=True), - schema.Column('b_id', types.Integer, schema.ForeignKey('b.id')), - schema.Column( - 'd_id', types.Integer, - schema.ForeignKey('d.id', use_alter=True, name='c_d_fk')), - mysql_engine='InnoDB' - ) - schema.Table( - 'd', metadata, - schema.Column('id', types.Integer, primary_key=True), - schema.Column('c_id', types.Integer, schema.ForeignKey('c.id')), - mysql_engine='InnoDB' - ) - - metadata.create_all(self.engine, checkfirst=False) - # will drop nothing if the test worked - self.addCleanup(metadata.drop_all, self.engine, checkfirst=True) - - def test_drop_all(self): - insp = inspect(self.engine) - self.assertEqual( - set(['a', 'b', 'c', 'd']), - set(insp.get_table_names()) - ) - - self.db.backend.drop_all_objects(self.engine) - - insp = inspect(self.engine) - self.assertEqual( - [], - insp.get_table_names() - ) - - -class BackendNotAvailableTest(oslo_test_base.BaseTestCase): - def test_no_dbapi(self): - backend = provision.Backend( - "postgresql", "postgresql+nosuchdbapi://hostname/dsn") - - with mock.patch( - "sqlalchemy.create_engine", - mock.Mock(side_effect=ImportError("nosuchdbapi"))): - - # NOTE(zzzeek): Call and test the _verify function twice, as it - # exercises a different code path on subsequent runs vs. - # the first run - ex = self.assertRaises( - exception.BackendNotAvailable, - backend._verify) - self.assertEqual( - "Backend 'postgresql+nosuchdbapi' is unavailable: " - "No DBAPI installed", str(ex)) - - ex = self.assertRaises( - exception.BackendNotAvailable, - backend._verify) - self.assertEqual( - "Backend 'postgresql+nosuchdbapi' is unavailable: " - "No DBAPI installed", str(ex)) - - def test_cant_connect(self): - backend = provision.Backend( - "postgresql", "postgresql+nosuchdbapi://hostname/dsn") - - with mock.patch( - "sqlalchemy.create_engine", - mock.Mock(return_value=mock.Mock(connect=mock.Mock( - side_effect=sa_exc.OperationalError( - "can't connect", None, None)) - )) - ): - - # NOTE(zzzeek): Call and test the _verify function twice, as it - # exercises a different code path on subsequent runs vs. - # the first run - ex = self.assertRaises( - exception.BackendNotAvailable, - backend._verify) - self.assertEqual( - "Backend 'postgresql+nosuchdbapi' is unavailable: " - "Could not connect", str(ex)) - - ex = self.assertRaises( - exception.BackendNotAvailable, - backend._verify) - self.assertEqual( - "Backend 'postgresql+nosuchdbapi' is unavailable: " - "Could not connect", str(ex)) - - -class MySQLDropAllObjectsTest( - DropAllObjectsTest, test_base.MySQLOpportunisticTestCase): - pass - - -class PostgreSQLDropAllObjectsTest( - DropAllObjectsTest, test_base.PostgreSQLOpportunisticTestCase): - pass - - -class RetainSchemaTest(oslo_test_base.BaseTestCase): - DRIVER = "sqlite" - - def setUp(self): - super(RetainSchemaTest, self).setUp() - - metadata = schema.MetaData() - self.test_table = schema.Table( - 'test_table', metadata, - schema.Column('x', types.Integer), - schema.Column('y', types.Integer), - mysql_engine='InnoDB' - ) - - def gen_schema(engine): - metadata.create_all(engine, checkfirst=False) - self._gen_schema = gen_schema - - def test_once(self): - self._run_test() - - def test_twice(self): - self._run_test() - - def _run_test(self): - try: - database_resource = provision.DatabaseResource(self.DRIVER) - except exception.BackendNotAvailable: - self.skip("database not available") - - schema_resource = provision.SchemaResource( - database_resource, self._gen_schema) - transaction_resource = provision.TransactionResource( - database_resource, schema_resource) - - engine = transaction_resource.getResource() - - with engine.connect() as conn: - rows = conn.execute(self.test_table.select()) - self.assertEqual([], rows.fetchall()) - - trans = conn.begin() - conn.execute( - self.test_table.insert(), - {"x": 1, "y": 2} - ) - trans.rollback() - - rows = conn.execute(self.test_table.select()) - self.assertEqual([], rows.fetchall()) - - trans = conn.begin() - conn.execute( - self.test_table.insert(), - {"x": 2, "y": 3} - ) - trans.commit() - - rows = conn.execute(self.test_table.select()) - self.assertEqual([(2, 3)], rows.fetchall()) - - transaction_resource.finishedWith(engine) - - -class MySQLRetainSchemaTest(RetainSchemaTest): - DRIVER = "mysql" - - -class PostgresqlRetainSchemaTest(RetainSchemaTest): - DRIVER = "postgresql" diff --git a/oslo_db/tests/sqlalchemy/test_sqlalchemy.py b/oslo_db/tests/sqlalchemy/test_sqlalchemy.py deleted file mode 100644 index cc37017..0000000 --- a/oslo_db/tests/sqlalchemy/test_sqlalchemy.py +++ /dev/null @@ -1,724 +0,0 @@ -# coding=utf-8 - -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for SQLAlchemy specific code.""" - -import logging -import os - -import fixtures -import mock -from oslo_config import cfg -from oslotest import base as oslo_test -import six -import sqlalchemy -from sqlalchemy import Column, MetaData, Table -from sqlalchemy.engine import url -from sqlalchemy import Integer, String -from sqlalchemy.ext.declarative import declarative_base - -from oslo_db import exception -from oslo_db import options as db_options -from oslo_db.sqlalchemy import engines -from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import session -from oslo_db.sqlalchemy import test_base - - -BASE = declarative_base() -_TABLE_NAME = '__tmp__test__tmp__' - -_REGEXP_TABLE_NAME = _TABLE_NAME + "regexp" - - -class RegexpTable(BASE, models.ModelBase): - __tablename__ = _REGEXP_TABLE_NAME - id = Column(Integer, primary_key=True) - bar = Column(String(255)) - - -class RegexpFilterTestCase(test_base.DbTestCase): - - def setUp(self): - super(RegexpFilterTestCase, self).setUp() - meta = MetaData() - meta.bind = self.engine - test_table = Table(_REGEXP_TABLE_NAME, meta, - Column('id', Integer, primary_key=True, - nullable=False), - Column('bar', String(255))) - test_table.create() - self.addCleanup(test_table.drop) - - def _test_regexp_filter(self, regexp, expected): - _session = self.sessionmaker() - with _session.begin(): - for i in ['10', '20', u'♥']: - tbl = RegexpTable() - tbl.update({'bar': i}) - tbl.save(session=_session) - - regexp_op = RegexpTable.bar.op('REGEXP')(regexp) - result = _session.query(RegexpTable).filter(regexp_op).all() - self.assertEqual(expected, [r.bar for r in result]) - - def test_regexp_filter(self): - self._test_regexp_filter('10', ['10']) - - def test_regexp_filter_nomatch(self): - self._test_regexp_filter('11', []) - - def test_regexp_filter_unicode(self): - self._test_regexp_filter(u'♥', [u'♥']) - - def test_regexp_filter_unicode_nomatch(self): - self._test_regexp_filter(u'♦', []) - - -class SQLiteSavepointTest(test_base.DbTestCase): - def setUp(self): - super(SQLiteSavepointTest, self).setUp() - meta = MetaData() - self.test_table = Table( - "test_table", meta, - Column('id', Integer, primary_key=True), - Column('data', String(10))) - self.test_table.create(self.engine) - self.addCleanup(self.test_table.drop, self.engine) - - def test_plain_transaction(self): - conn = self.engine.connect() - trans = conn.begin() - conn.execute( - self.test_table.insert(), - {'data': 'data 1'} - ) - self.assertEqual( - [(1, 'data 1')], - self.engine.execute( - self.test_table.select(). - order_by(self.test_table.c.id) - ).fetchall() - ) - trans.rollback() - self.assertEqual( - 0, - self.engine.scalar(self.test_table.count()) - ) - - def test_savepoint_middle(self): - with self.engine.begin() as conn: - conn.execute( - self.test_table.insert(), - {'data': 'data 1'} - ) - - savepoint = conn.begin_nested() - conn.execute( - self.test_table.insert(), - {'data': 'data 2'} - ) - savepoint.rollback() - - conn.execute( - self.test_table.insert(), - {'data': 'data 3'} - ) - - self.assertEqual( - [(1, 'data 1'), (2, 'data 3')], - self.engine.execute( - self.test_table.select(). - order_by(self.test_table.c.id) - ).fetchall() - ) - - def test_savepoint_beginning(self): - with self.engine.begin() as conn: - savepoint = conn.begin_nested() - conn.execute( - self.test_table.insert(), - {'data': 'data 1'} - ) - savepoint.rollback() - - conn.execute( - self.test_table.insert(), - {'data': 'data 2'} - ) - - self.assertEqual( - [(1, 'data 2')], - self.engine.execute( - self.test_table.select(). - order_by(self.test_table.c.id) - ).fetchall() - ) - - -class FakeDBAPIConnection(object): - def cursor(self): - return FakeCursor() - - -class FakeCursor(object): - def execute(self, sql): - pass - - -class FakeConnectionProxy(object): - pass - - -class FakeConnectionRec(object): - pass - - -class OperationalError(Exception): - pass - - -class ProgrammingError(Exception): - pass - - -class FakeDB2Engine(object): - - class Dialect(object): - - def is_disconnect(self, e, *args): - expected_error = ('SQL30081N: DB2 Server connection is no longer ' - 'active') - return (str(e) == expected_error) - - dialect = Dialect() - name = 'ibm_db_sa' - - def dispose(self): - pass - - -class MySQLDefaultModeTestCase(test_base.MySQLOpportunisticTestCase): - def test_default_is_traditional(self): - with self.engine.connect() as conn: - sql_mode = conn.execute( - "SHOW VARIABLES LIKE 'sql_mode'" - ).first()[1] - - self.assertTrue("TRADITIONAL" in sql_mode) - - -class MySQLModeTestCase(test_base.MySQLOpportunisticTestCase): - - def __init__(self, *args, **kwargs): - super(MySQLModeTestCase, self).__init__(*args, **kwargs) - # By default, run in empty SQL mode. - # Subclasses override this with specific modes. - self.mysql_mode = '' - - def setUp(self): - super(MySQLModeTestCase, self).setUp() - mode_engine = session.create_engine( - self.engine.url, - mysql_sql_mode=self.mysql_mode) - self.connection = mode_engine.connect() - - meta = MetaData() - self.test_table = Table(_TABLE_NAME + "mode", meta, - Column('id', Integer, primary_key=True), - Column('bar', String(255))) - self.test_table.create(self.connection) - - def cleanup(): - self.test_table.drop(self.connection) - self.connection.close() - mode_engine.dispose() - self.addCleanup(cleanup) - - def _test_string_too_long(self, value): - with self.connection.begin(): - self.connection.execute(self.test_table.insert(), - bar=value) - result = self.connection.execute(self.test_table.select()) - return result.fetchone()['bar'] - - def test_string_too_long(self): - value = 'a' * 512 - # String is too long. - # With no SQL mode set, this gets truncated. - self.assertNotEqual(value, - self._test_string_too_long(value)) - - -class MySQLStrictAllTablesModeTestCase(MySQLModeTestCase): - "Test data integrity enforcement in MySQL STRICT_ALL_TABLES mode." - - def __init__(self, *args, **kwargs): - super(MySQLStrictAllTablesModeTestCase, self).__init__(*args, **kwargs) - self.mysql_mode = 'STRICT_ALL_TABLES' - - def test_string_too_long(self): - value = 'a' * 512 - # String is too long. - # With STRICT_ALL_TABLES or TRADITIONAL mode set, this is an error. - self.assertRaises(exception.DBError, - self._test_string_too_long, value) - - -class MySQLTraditionalModeTestCase(MySQLStrictAllTablesModeTestCase): - """Test data integrity enforcement in MySQL TRADITIONAL mode. - - Since TRADITIONAL includes STRICT_ALL_TABLES, this inherits all - STRICT_ALL_TABLES mode tests. - """ - - def __init__(self, *args, **kwargs): - super(MySQLTraditionalModeTestCase, self).__init__(*args, **kwargs) - self.mysql_mode = 'TRADITIONAL' - - -class EngineFacadeTestCase(oslo_test.BaseTestCase): - def setUp(self): - super(EngineFacadeTestCase, self).setUp() - - self.facade = session.EngineFacade('sqlite://') - - def test_get_engine(self): - eng1 = self.facade.get_engine() - eng2 = self.facade.get_engine() - - self.assertIs(eng1, eng2) - - def test_get_session(self): - ses1 = self.facade.get_session() - ses2 = self.facade.get_session() - - self.assertIsNot(ses1, ses2) - - def test_get_session_arguments_override_default_settings(self): - ses = self.facade.get_session(autocommit=False, expire_on_commit=True) - - self.assertFalse(ses.autocommit) - self.assertTrue(ses.expire_on_commit) - - @mock.patch('oslo_db.sqlalchemy.orm.get_maker') - @mock.patch('oslo_db.sqlalchemy.engines.create_engine') - def test_creation_from_config(self, create_engine, get_maker): - conf = cfg.ConfigOpts() - conf.register_opts(db_options.database_opts, group='database') - - overrides = { - 'connection': 'sqlite:///:memory:', - 'slave_connection': None, - 'connection_debug': 100, - 'max_pool_size': 10, - 'mysql_sql_mode': 'TRADITIONAL', - } - for optname, optvalue in overrides.items(): - conf.set_override(optname, optvalue, group='database') - - session.EngineFacade.from_config(conf, - autocommit=False, - expire_on_commit=True) - - create_engine.assert_called_once_with( - sql_connection='sqlite:///:memory:', - connection_debug=100, - max_pool_size=10, - mysql_sql_mode='TRADITIONAL', - sqlite_fk=False, - idle_timeout=mock.ANY, - retry_interval=mock.ANY, - max_retries=mock.ANY, - max_overflow=mock.ANY, - connection_trace=mock.ANY, - sqlite_synchronous=mock.ANY, - pool_timeout=mock.ANY, - thread_checkin=mock.ANY, - json_serializer=None, - json_deserializer=None, - logging_name=mock.ANY, - ) - get_maker.assert_called_once_with(engine=create_engine(), - autocommit=False, - expire_on_commit=True) - - def test_slave_connection(self): - paths = self.create_tempfiles([('db.master', ''), ('db.slave', '')], - ext='') - master_path = 'sqlite:///' + paths[0] - slave_path = 'sqlite:///' + paths[1] - - facade = session.EngineFacade( - sql_connection=master_path, - slave_connection=slave_path - ) - - master = facade.get_engine() - self.assertEqual(master_path, str(master.url)) - slave = facade.get_engine(use_slave=True) - self.assertEqual(slave_path, str(slave.url)) - - master_session = facade.get_session() - self.assertEqual(master_path, str(master_session.bind.url)) - slave_session = facade.get_session(use_slave=True) - self.assertEqual(slave_path, str(slave_session.bind.url)) - - def test_slave_connection_string_not_provided(self): - master_path = 'sqlite:///' + self.create_tempfiles( - [('db.master', '')], ext='')[0] - - facade = session.EngineFacade(sql_connection=master_path) - - master = facade.get_engine() - slave = facade.get_engine(use_slave=True) - self.assertIs(master, slave) - self.assertEqual(master_path, str(master.url)) - - master_session = facade.get_session() - self.assertEqual(master_path, str(master_session.bind.url)) - slave_session = facade.get_session(use_slave=True) - self.assertEqual(master_path, str(slave_session.bind.url)) - - -class SQLiteConnectTest(oslo_test.BaseTestCase): - - def _fixture(self, **kw): - return session.create_engine("sqlite://", **kw) - - def test_sqlite_fk_listener(self): - engine = self._fixture(sqlite_fk=True) - self.assertEqual( - 1, - engine.scalar("pragma foreign_keys") - ) - - engine = self._fixture(sqlite_fk=False) - - self.assertEqual( - 0, - engine.scalar("pragma foreign_keys") - ) - - def test_sqlite_synchronous_listener(self): - engine = self._fixture() - - # "The default setting is synchronous=FULL." (e.g. 2) - # http://www.sqlite.org/pragma.html#pragma_synchronous - self.assertEqual( - 2, - engine.scalar("pragma synchronous") - ) - - engine = self._fixture(sqlite_synchronous=False) - - self.assertEqual( - 0, - engine.scalar("pragma synchronous") - ) - - -class MysqlConnectTest(test_base.MySQLOpportunisticTestCase): - - def _fixture(self, sql_mode): - return session.create_engine(self.engine.url, mysql_sql_mode=sql_mode) - - def _assert_sql_mode(self, engine, sql_mode_present, sql_mode_non_present): - mode = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()[1] - self.assertTrue( - sql_mode_present in mode - ) - if sql_mode_non_present: - self.assertTrue( - sql_mode_non_present not in mode - ) - - def test_set_mode_traditional(self): - engine = self._fixture(sql_mode='TRADITIONAL') - self._assert_sql_mode(engine, "TRADITIONAL", "ANSI") - - def test_set_mode_ansi(self): - engine = self._fixture(sql_mode='ANSI') - self._assert_sql_mode(engine, "ANSI", "TRADITIONAL") - - def test_set_mode_no_mode(self): - # If _mysql_set_mode_callback is called with sql_mode=None, then - # the SQL mode is NOT set on the connection. - - # get the GLOBAL sql_mode, not the @@SESSION, so that - # we get what is configured for the MySQL database, as opposed - # to what our own session.create_engine() has set it to. - expected = self.engine.execute( - "SELECT @@GLOBAL.sql_mode").scalar() - - engine = self._fixture(sql_mode=None) - self._assert_sql_mode(engine, expected, None) - - def test_fail_detect_mode(self): - # If "SHOW VARIABLES LIKE 'sql_mode'" results in no row, then - # we get a log indicating can't detect the mode. - - log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) - - mysql_conn = self.engine.raw_connection() - self.addCleanup(mysql_conn.close) - mysql_conn.detach() - mysql_cursor = mysql_conn.cursor() - - def execute(statement, parameters=()): - if "SHOW VARIABLES LIKE 'sql_mode'" in statement: - statement = "SHOW VARIABLES LIKE 'i_dont_exist'" - return mysql_cursor.execute(statement, parameters) - - test_engine = sqlalchemy.create_engine(self.engine.url, - _initialize=False) - - with mock.patch.object( - test_engine.pool, '_creator', - mock.Mock( - return_value=mock.Mock( - cursor=mock.Mock( - return_value=mock.Mock( - execute=execute, - fetchone=mysql_cursor.fetchone, - fetchall=mysql_cursor.fetchall - ) - ) - ) - ) - ): - engines._init_events.dispatch_on_drivername("mysql")(test_engine) - - test_engine.raw_connection() - self.assertIn('Unable to detect effective SQL mode', - log.output) - - def test_logs_real_mode(self): - # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value, then - # we get a log with the value. - - log = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) - - engine = self._fixture(sql_mode='TRADITIONAL') - - actual_mode = engine.execute( - "SHOW VARIABLES LIKE 'sql_mode'").fetchone()[1] - - self.assertIn('MySQL server mode set to %s' % actual_mode, - log.output) - - def test_warning_when_not_traditional(self): - # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that doesn't - # include 'TRADITIONAL', then a warning is logged. - - log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) - self._fixture(sql_mode='ANSI') - - self.assertIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES", - log.output) - - def test_no_warning_when_traditional(self): - # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that includes - # 'TRADITIONAL', then no warning is logged. - - log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) - self._fixture(sql_mode='TRADITIONAL') - - self.assertNotIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES", - log.output) - - def test_no_warning_when_strict_all_tables(self): - # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that includes - # 'STRICT_ALL_TABLES', then no warning is logged. - - log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) - self._fixture(sql_mode='TRADITIONAL') - - self.assertNotIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES", - log.output) - - -class CreateEngineTest(oslo_test.BaseTestCase): - """Test that dialect-specific arguments/ listeners are set up correctly. - - """ - - def setUp(self): - super(CreateEngineTest, self).setUp() - self.args = {'connect_args': {}} - - def test_queuepool_args(self): - engines._init_connection_args( - url.make_url("mysql+pymysql://u:p@host/test"), self.args, - max_pool_size=10, max_overflow=10) - self.assertEqual(10, self.args['pool_size']) - self.assertEqual(10, self.args['max_overflow']) - - def test_sqlite_memory_pool_args(self): - for _url in ("sqlite://", "sqlite:///:memory:"): - engines._init_connection_args( - url.make_url(_url), self.args, - max_pool_size=10, max_overflow=10) - - # queuepool arguments are not peresnet - self.assertTrue( - 'pool_size' not in self.args) - self.assertTrue( - 'max_overflow' not in self.args) - - self.assertEqual(False, - self.args['connect_args']['check_same_thread']) - - # due to memory connection - self.assertTrue('poolclass' in self.args) - - def test_sqlite_file_pool_args(self): - engines._init_connection_args( - url.make_url("sqlite:///somefile.db"), self.args, - max_pool_size=10, max_overflow=10) - - # queuepool arguments are not peresnet - self.assertTrue('pool_size' not in self.args) - self.assertTrue( - 'max_overflow' not in self.args) - - self.assertFalse(self.args['connect_args']) - - # NullPool is the default for file based connections, - # no need to specify this - self.assertTrue('poolclass' not in self.args) - - def _test_mysql_connect_args_default(self, connect_args): - if six.PY3: - self.assertEqual({'charset': 'utf8', 'use_unicode': 1}, - connect_args) - else: - self.assertEqual({'charset': 'utf8', 'use_unicode': 0}, - connect_args) - - def test_mysql_connect_args_default(self): - engines._init_connection_args( - url.make_url("mysql://u:p@host/test"), self.args) - self._test_mysql_connect_args_default(self.args['connect_args']) - - def test_mysql_oursql_connect_args_default(self): - engines._init_connection_args( - url.make_url("mysql+oursql://u:p@host/test"), self.args) - self._test_mysql_connect_args_default(self.args['connect_args']) - - def test_mysql_pymysql_connect_args_default(self): - engines._init_connection_args( - url.make_url("mysql+pymysql://u:p@host/test"), self.args) - self.assertEqual({'charset': 'utf8'}, self.args['connect_args']) - - def test_mysql_mysqldb_connect_args_default(self): - engines._init_connection_args( - url.make_url("mysql+mysqldb://u:p@host/test"), self.args) - self._test_mysql_connect_args_default(self.args['connect_args']) - - def test_postgresql_connect_args_default(self): - engines._init_connection_args( - url.make_url("postgresql://u:p@host/test"), self.args) - self.assertEqual('utf8', self.args['client_encoding']) - self.assertFalse(self.args['connect_args']) - - def test_mysqlconnector_raise_on_warnings_default(self): - engines._init_connection_args( - url.make_url("mysql+mysqlconnector://u:p@host/test"), - self.args) - self.assertEqual(False, self.args['connect_args']['raise_on_warnings']) - - def test_mysqlconnector_raise_on_warnings_override(self): - engines._init_connection_args( - url.make_url( - "mysql+mysqlconnector://u:p@host/test" - "?raise_on_warnings=true"), - self.args - ) - - self.assertFalse('raise_on_warnings' in self.args['connect_args']) - - def test_thread_checkin(self): - with mock.patch("sqlalchemy.event.listens_for"): - with mock.patch("sqlalchemy.event.listen") as listen_evt: - engines._init_events.dispatch_on_drivername( - "sqlite")(mock.Mock()) - - self.assertEqual( - listen_evt.mock_calls[0][1][-1], - engines._thread_yield - ) - - -class ProcessGuardTest(test_base.DbTestCase): - def test_process_guard(self): - self.engine.dispose() - - def get_parent_pid(): - return 4 - - def get_child_pid(): - return 5 - - with mock.patch("os.getpid", get_parent_pid): - with self.engine.connect() as conn: - dbapi_id = id(conn.connection.connection) - - with mock.patch("os.getpid", get_child_pid): - with self.engine.connect() as conn: - new_dbapi_id = id(conn.connection.connection) - - self.assertNotEqual(dbapi_id, new_dbapi_id) - - # ensure it doesn't trip again - with mock.patch("os.getpid", get_child_pid): - with self.engine.connect() as conn: - newer_dbapi_id = id(conn.connection.connection) - - self.assertEqual(new_dbapi_id, newer_dbapi_id) - - -class PatchStacktraceTest(test_base.DbTestCase): - - def test_trace(self): - engine = self.engine - - # NOTE(viktors): The code in oslo_db.sqlalchemy.session filters out - # lines from modules under oslo_db, so we should remove - # "oslo_db/" from file path in traceback. - import traceback - orig_extract_stack = traceback.extract_stack - - def extract_stack(): - return [(row[0].replace("oslo_db/", ""), row[1], row[2], row[3]) - for row in orig_extract_stack()] - - with mock.patch("traceback.extract_stack", side_effect=extract_stack): - - engines._add_trace_comments(engine) - conn = engine.connect() - orig_do_exec = engine.dialect.do_execute - with mock.patch.object(engine.dialect, "do_execute") as mock_exec: - - mock_exec.side_effect = orig_do_exec - conn.execute("select 1;") - - call = mock_exec.mock_calls[0] - - # we're the caller, see that we're in there - caller = os.path.join("tests", "sqlalchemy", "test_sqlalchemy.py") - self.assertIn(caller, call[1][1]) diff --git a/oslo_db/tests/sqlalchemy/test_types.py b/oslo_db/tests/sqlalchemy/test_types.py deleted file mode 100644 index 4636c49..0000000 --- a/oslo_db/tests/sqlalchemy/test_types.py +++ /dev/null @@ -1,111 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for JSON SQLAlchemy types.""" - -from sqlalchemy import Column, Integer -from sqlalchemy.dialects import mysql -from sqlalchemy.ext.declarative import declarative_base - -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import types - - -BASE = declarative_base() - - -class JsonTable(BASE, models.ModelBase): - __tablename__ = 'test_json_types' - id = Column(Integer, primary_key=True) - jdict = Column(types.JsonEncodedDict) - jlist = Column(types.JsonEncodedList) - json = Column(types.JsonEncodedType) - - -class JsonTypesTestCase(test_base.DbTestCase): - def setUp(self): - super(JsonTypesTestCase, self).setUp() - JsonTable.__table__.create(self.engine) - self.addCleanup(JsonTable.__table__.drop, self.engine) - self.session = self.sessionmaker() - self.addCleanup(self.session.close) - - def test_default_value(self): - with self.session.begin(): - JsonTable(id=1).save(self.session) - obj = self.session.query(JsonTable).filter_by(id=1).one() - self.assertEqual([], obj.jlist) - self.assertEqual({}, obj.jdict) - self.assertIsNone(obj.json) - - def test_dict(self): - test = {'a': 42, 'b': [1, 2, 3]} - with self.session.begin(): - JsonTable(id=1, jdict=test).save(self.session) - obj = self.session.query(JsonTable).filter_by(id=1).one() - self.assertEqual(test, obj.jdict) - - def test_list(self): - test = [1, True, "hello", {}] - with self.session.begin(): - JsonTable(id=1, jlist=test).save(self.session) - obj = self.session.query(JsonTable).filter_by(id=1).one() - self.assertEqual(test, obj.jlist) - - def test_dict_type_check(self): - self.assertRaises(db_exc.DBError, - JsonTable(id=1, jdict=[]).save, self.session) - - def test_list_type_check(self): - self.assertRaises(db_exc.DBError, - JsonTable(id=1, jlist={}).save, self.session) - - def test_generic(self): - tested = [ - "string", - 42, - True, - None, - [1, 2, 3], - {'a': 'b'} - ] - for i, test in enumerate(tested): - with self.session.begin(): - JsonTable(id=i, json=test).save(self.session) - obj = self.session.query(JsonTable).filter_by(id=i).one() - self.assertEqual(test, obj.json) - - def test_mysql_variants(self): - self.assertEqual( - "LONGTEXT", - str( - types.JsonEncodedDict(mysql_as_long=True).compile( - dialect=mysql.dialect()) - ) - ) - - self.assertEqual( - "MEDIUMTEXT", - str( - types.JsonEncodedDict(mysql_as_medium=True).compile( - dialect=mysql.dialect()) - ) - ) - - self.assertRaises( - TypeError, - lambda: types.JsonEncodedDict( - mysql_as_long=True, - mysql_as_medium=True) - ) diff --git a/oslo_db/tests/sqlalchemy/test_update_match.py b/oslo_db/tests/sqlalchemy/test_update_match.py deleted file mode 100644 index ecc7af7..0000000 --- a/oslo_db/tests/sqlalchemy/test_update_match.py +++ /dev/null @@ -1,445 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslotest import base as oslo_test_base -from sqlalchemy.ext import declarative -from sqlalchemy import schema -from sqlalchemy import sql -from sqlalchemy import types as sqltypes - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import update_match - -Base = declarative.declarative_base() - - -class MyModel(Base): - __tablename__ = 'my_table' - - id = schema.Column(sqltypes.Integer, primary_key=True) - uuid = schema.Column(sqltypes.String(36), nullable=False, unique=True) - x = schema.Column(sqltypes.Integer) - y = schema.Column(sqltypes.String(40)) - z = schema.Column(sqltypes.String(40)) - - -class ManufactureCriteriaTest(oslo_test_base.BaseTestCase): - def test_instance_criteria_basic(self): - specimen = MyModel( - y='y1', z='z3', - uuid='136254d5-3869-408f-9da7-190e0072641a' - ) - self.assertEqual( - "my_table.uuid = :uuid_1 AND my_table.y = :y_1 " - "AND my_table.z = :z_1", - str(update_match.manufacture_entity_criteria(specimen).compile()) - ) - - def test_instance_criteria_basic_wnone(self): - specimen = MyModel( - y='y1', z=None, - uuid='136254d5-3869-408f-9da7-190e0072641a' - ) - self.assertEqual( - "my_table.uuid = :uuid_1 AND my_table.y = :y_1 " - "AND my_table.z IS NULL", - str(update_match.manufacture_entity_criteria(specimen).compile()) - ) - - def test_instance_criteria_tuples(self): - specimen = MyModel( - y='y1', z=('z1', 'z2'), - ) - self.assertEqual( - "my_table.y = :y_1 AND my_table.z IN (:z_1, :z_2)", - str(update_match.manufacture_entity_criteria(specimen).compile()) - ) - - def test_instance_criteria_tuples_wnone(self): - specimen = MyModel( - y='y1', z=('z1', 'z2', None), - ) - self.assertEqual( - "my_table.y = :y_1 AND (my_table.z IS NULL OR " - "my_table.z IN (:z_1, :z_2))", - str(update_match.manufacture_entity_criteria(specimen).compile()) - ) - - def test_instance_criteria_none_list(self): - specimen = MyModel( - y='y1', z=[None], - ) - self.assertEqual( - "my_table.y = :y_1 AND my_table.z IS NULL", - str(update_match.manufacture_entity_criteria(specimen).compile()) - ) - - -class UpdateMatchTest(test_base.DbTestCase): - def setUp(self): - super(UpdateMatchTest, self).setUp() - Base.metadata.create_all(self.engine) - self.addCleanup(Base.metadata.drop_all, self.engine) - # self.engine.echo = 'debug' - self.session = self.sessionmaker(autocommit=False) - self.addCleanup(self.session.close) - self.session.add_all([ - MyModel( - id=1, - uuid='23cb9224-9f8e-40fe-bd3c-e7577b7af37d', - x=5, y='y1', z='z1'), - MyModel( - id=2, - uuid='136254d5-3869-408f-9da7-190e0072641a', - x=6, y='y1', z='z2'), - MyModel( - id=3, - uuid='094eb162-d5df-494b-a458-a91a1b2d2c65', - x=7, y='y1', z='z1'), - MyModel( - id=4, - uuid='94659b3f-ea1f-4ffd-998d-93b28f7f5b70', - x=8, y='y2', z='z2'), - MyModel( - id=5, - uuid='bdf3893c-ee3c-40a0-bc79-960adb6cd1d4', - x=8, y='y2', z=None), - ]) - - self.session.commit() - - def _assert_row(self, pk, values): - row = self.session.execute( - sql.select([MyModel.__table__]).where(MyModel.__table__.c.id == pk) - ).first() - values['id'] = pk - self.assertEqual(values, dict(row)) - - def test_update_specimen_successful(self): - uuid = '136254d5-3869-408f-9da7-190e0072641a' - - specimen = MyModel( - y='y1', z='z2', uuid=uuid - ) - - result = self.session.query(MyModel).update_on_match( - specimen, - 'uuid', - values={'x': 9, 'z': 'z3'} - ) - - self.assertEqual(uuid, result.uuid) - self.assertEqual(2, result.id) - self.assertEqual('z3', result.z) - self.assertIn(result, self.session) - - self._assert_row( - 2, - { - 'uuid': '136254d5-3869-408f-9da7-190e0072641a', - 'x': 9, 'y': 'y1', 'z': 'z3' - } - ) - - def test_update_specimen_include_only(self): - uuid = '136254d5-3869-408f-9da7-190e0072641a' - - specimen = MyModel( - y='y9', z='z5', x=6, uuid=uuid - ) - - # Query the object first to test that we merge when the object is - # already cached in the session. - self.session.query(MyModel).filter(MyModel.uuid == uuid).one() - - result = self.session.query(MyModel).update_on_match( - specimen, - 'uuid', - values={'x': 9, 'z': 'z3'}, - include_only=('x', ) - ) - - self.assertEqual(uuid, result.uuid) - self.assertEqual(2, result.id) - self.assertEqual('z3', result.z) - self.assertIn(result, self.session) - self.assertNotIn(result, self.session.dirty) - - self._assert_row( - 2, - { - 'uuid': '136254d5-3869-408f-9da7-190e0072641a', - 'x': 9, 'y': 'y1', 'z': 'z3' - } - ) - - def test_update_specimen_no_rows(self): - specimen = MyModel( - y='y1', z='z3', - uuid='136254d5-3869-408f-9da7-190e0072641a' - ) - - exc = self.assertRaises( - update_match.NoRowsMatched, - self.session.query(MyModel).update_on_match, - specimen, 'uuid', values={'x': 9, 'z': 'z3'} - ) - - self.assertEqual("Zero rows matched for 3 attempts", exc.args[0]) - - def test_update_specimen_process_query_no_rows(self): - specimen = MyModel( - y='y1', z='z2', - uuid='136254d5-3869-408f-9da7-190e0072641a' - ) - - def process_query(query): - return query.filter_by(x=10) - - exc = self.assertRaises( - update_match.NoRowsMatched, - self.session.query(MyModel).update_on_match, - specimen, 'uuid', values={'x': 9, 'z': 'z3'}, - process_query=process_query - ) - - self.assertEqual("Zero rows matched for 3 attempts", exc.args[0]) - - def test_update_specimen_given_query_no_rows(self): - specimen = MyModel( - y='y1', z='z2', - uuid='136254d5-3869-408f-9da7-190e0072641a' - ) - - query = self.session.query(MyModel).filter_by(x=10) - - exc = self.assertRaises( - update_match.NoRowsMatched, - query.update_on_match, - specimen, 'uuid', values={'x': 9, 'z': 'z3'}, - ) - - self.assertEqual("Zero rows matched for 3 attempts", exc.args[0]) - - def test_update_specimen_multi_rows(self): - specimen = MyModel( - y='y1', z='z1', - ) - - exc = self.assertRaises( - update_match.MultiRowsMatched, - self.session.query(MyModel).update_on_match, - specimen, 'y', values={'x': 9, 'z': 'z3'} - ) - - self.assertEqual("2 rows matched; expected one", exc.args[0]) - - def test_update_specimen_query_mismatch_error(self): - specimen = MyModel( - y='y1' - ) - q = self.session.query(MyModel.x, MyModel.y) - exc = self.assertRaises( - AssertionError, - q.update_on_match, - specimen, 'y', values={'x': 9, 'z': 'z3'}, - ) - - self.assertEqual("Query does not match given specimen", exc.args[0]) - - def test_custom_handle_failure_raise_new(self): - class MyException(Exception): - pass - - def handle_failure(query): - # ensure the query is usable - result = query.count() - self.assertEqual(0, result) - - raise MyException("test: %d" % result) - - specimen = MyModel( - y='y1', z='z3', - uuid='136254d5-3869-408f-9da7-190e0072641a' - ) - - exc = self.assertRaises( - MyException, - self.session.query(MyModel).update_on_match, - specimen, 'uuid', values={'x': 9, 'z': 'z3'}, - handle_failure=handle_failure - ) - - self.assertEqual("test: 0", exc.args[0]) - - def test_custom_handle_failure_cancel_raise(self): - uuid = '136254d5-3869-408f-9da7-190e0072641a' - - class MyException(Exception): - pass - - def handle_failure(query): - # ensure the query is usable - result = query.count() - self.assertEqual(0, result) - - return True - - specimen = MyModel( - id=2, y='y1', z='z3', uuid=uuid - ) - - result = self.session.query(MyModel).update_on_match( - specimen, 'uuid', values={'x': 9, 'z': 'z3'}, - handle_failure=handle_failure - ) - self.assertEqual(uuid, result.uuid) - self.assertEqual(2, result.id) - self.assertEqual('z3', result.z) - self.assertEqual(9, result.x) - self.assertIn(result, self.session) - - def test_update_specimen_on_none_successful(self): - uuid = 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4' - - specimen = MyModel( - y='y2', z=None, uuid=uuid - ) - - result = self.session.query(MyModel).update_on_match( - specimen, - 'uuid', - values={'x': 9, 'z': 'z3'}, - ) - - self.assertIn(result, self.session) - self.assertEqual(uuid, result.uuid) - self.assertEqual(5, result.id) - self.assertEqual('z3', result.z) - self._assert_row( - 5, - { - 'uuid': 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4', - 'x': 9, 'y': 'y2', 'z': 'z3' - } - ) - - def test_update_specimen_on_multiple_nonnone_successful(self): - uuid = '094eb162-d5df-494b-a458-a91a1b2d2c65' - - specimen = MyModel( - y=('y1', 'y2'), x=(5, 7), uuid=uuid - ) - - result = self.session.query(MyModel).update_on_match( - specimen, - 'uuid', - values={'x': 9, 'z': 'z3'}, - ) - - self.assertIn(result, self.session) - self.assertEqual(uuid, result.uuid) - self.assertEqual(3, result.id) - self.assertEqual('z3', result.z) - self._assert_row( - 3, - { - 'uuid': '094eb162-d5df-494b-a458-a91a1b2d2c65', - 'x': 9, 'y': 'y1', 'z': 'z3' - } - ) - - def test_update_specimen_on_multiple_wnone_successful(self): - uuid = 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4' - specimen = MyModel( - y=('y1', 'y2'), x=(8, 7), z=('z1', 'z2', None), uuid=uuid - ) - - result = self.session.query(MyModel).update_on_match( - specimen, - 'uuid', - values={'x': 9, 'z': 'z3'}, - ) - - self.assertIn(result, self.session) - self.assertEqual(uuid, result.uuid) - self.assertEqual(5, result.id) - self.assertEqual('z3', result.z) - self._assert_row( - 5, - { - 'uuid': 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4', - 'x': 9, 'y': 'y2', 'z': 'z3' - } - ) - - def test_update_returning_pk_matched(self): - pk = self.session.query(MyModel).\ - filter_by(y='y1', z='z2').update_returning_pk( - {'x': 9, 'z': 'z3'}, - ('uuid', '136254d5-3869-408f-9da7-190e0072641a') - ) - - self.assertEqual((2,), pk) - self._assert_row( - 2, - { - 'uuid': '136254d5-3869-408f-9da7-190e0072641a', - 'x': 9, 'y': 'y1', 'z': 'z3' - } - ) - - def test_update_returning_wrong_uuid(self): - exc = self.assertRaises( - update_match.NoRowsMatched, - self.session.query(MyModel). - filter_by(y='y1', z='z2').update_returning_pk, - {'x': 9, 'z': 'z3'}, - ('uuid', '23cb9224-9f8e-40fe-bd3c-e7577b7af37d') - ) - - self.assertEqual("No rows matched the UPDATE", exc.args[0]) - - def test_update_returning_no_rows(self): - exc = self.assertRaises( - update_match.NoRowsMatched, - self.session.query(MyModel). - filter_by(y='y1', z='z3').update_returning_pk, - {'x': 9, 'z': 'z3'}, - ('uuid', '136254d5-3869-408f-9da7-190e0072641a') - ) - - self.assertEqual("No rows matched the UPDATE", exc.args[0]) - - def test_update_multiple_rows(self): - exc = self.assertRaises( - update_match.MultiRowsMatched, - self.session.query(MyModel). - filter_by(y='y1', z='z1').update_returning_pk, - {'x': 9, 'z': 'z3'}, - ('y', 'y1') - ) - - self.assertEqual("2 rows matched; expected one", exc.args[0]) - - -class PGUpdateMatchTest( - UpdateMatchTest, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class MySQLUpdateMatchTest( - UpdateMatchTest, - test_base.MySQLOpportunisticTestCase): - pass diff --git a/oslo_db/tests/sqlalchemy/test_utils.py b/oslo_db/tests/sqlalchemy/test_utils.py deleted file mode 100644 index 19f78ce..0000000 --- a/oslo_db/tests/sqlalchemy/test_utils.py +++ /dev/null @@ -1,1335 +0,0 @@ -# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me). -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import fixtures -import mock -from oslotest import base as test_base -from oslotest import moxstubout -import six -from six.moves.urllib import parse -import sqlalchemy -from sqlalchemy.dialects import mysql -from sqlalchemy import Boolean, Index, Integer, DateTime, String, SmallInteger -from sqlalchemy import CheckConstraint -from sqlalchemy import MetaData, Table, Column, ForeignKey -from sqlalchemy.engine import reflection -from sqlalchemy.engine import url as sa_url -from sqlalchemy.exc import OperationalError -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.ext.hybrid import hybrid_property -from sqlalchemy.orm import Session -from sqlalchemy.sql import select -from sqlalchemy.types import UserDefinedType, NullType -from sqlalchemy.dialects.postgresql import psycopg2 - -from oslo_db import exception -from oslo_db.sqlalchemy.compat import utils as compat_utils -from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import provision -from oslo_db.sqlalchemy import session -from oslo_db.sqlalchemy import test_base as db_test_base -from oslo_db.sqlalchemy import utils -from oslo_db.tests import utils as test_utils - - -Base = declarative_base() -SA_VERSION = compat_utils.SQLA_VERSION - - -class TestSanitizeDbUrl(test_base.BaseTestCase): - - def test_url_with_cred(self): - db_url = 'myproto://johndoe:secret@localhost/myschema' - expected = 'myproto://****:****@localhost/myschema' - actual = utils.sanitize_db_url(db_url) - self.assertEqual(expected, actual) - - def test_url_with_no_cred(self): - db_url = 'sqlite:///mysqlitefile' - actual = utils.sanitize_db_url(db_url) - self.assertEqual(db_url, actual) - - -class CustomType(UserDefinedType): - """Dummy column type for testing unsupported types.""" - def get_col_spec(self): - return "CustomType" - - -class FakeTable(Base): - __tablename__ = 'fake_table' - - user_id = Column(String(50), primary_key=True) - project_id = Column(String(50)) - snapshot_id = Column(String(50)) - - # mox is comparing in some awkward way that - # in this case requires the same identity of object - _expr_to_appease_mox = project_id + snapshot_id - - @hybrid_property - def some_hybrid(self): - raise NotImplementedError() - - @some_hybrid.expression - def some_hybrid(cls): - return cls._expr_to_appease_mox - - def foo(self): - pass - - -class FakeModel(object): - def __init__(self, values): - self.values = values - - def __getattr__(self, name): - try: - value = self.values[name] - except KeyError: - raise AttributeError(name) - return value - - def __getitem__(self, key): - if key in self.values: - return self.values[key] - else: - raise NotImplementedError() - - def __repr__(self): - return '' % self.values - - -class TestPaginateQuery(test_base.BaseTestCase): - def setUp(self): - super(TestPaginateQuery, self).setUp() - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.mox = mox_fixture.mox - self.query = self.mox.CreateMockAnything() - self.mox.StubOutWithMock(sqlalchemy, 'asc') - self.mox.StubOutWithMock(sqlalchemy, 'desc') - self.marker = FakeTable(user_id='user', - project_id='p', - snapshot_id='s') - self.model = FakeTable - - def test_paginate_query_no_pagination_no_sort_dirs(self): - sqlalchemy.asc(self.model.user_id).AndReturn('asc_3') - self.query.order_by('asc_3').AndReturn(self.query) - sqlalchemy.asc(self.model.project_id).AndReturn('asc_2') - self.query.order_by('asc_2').AndReturn(self.query) - sqlalchemy.asc(self.model.snapshot_id).AndReturn('asc_1') - self.query.order_by('asc_1').AndReturn(self.query) - self.query.limit(5).AndReturn(self.query) - self.mox.ReplayAll() - utils.paginate_query(self.query, self.model, 5, - ['user_id', 'project_id', 'snapshot_id']) - - def test_paginate_query_no_pagination(self): - sqlalchemy.asc(self.model.user_id).AndReturn('asc') - self.query.order_by('asc').AndReturn(self.query) - sqlalchemy.desc(self.model.project_id).AndReturn('desc') - self.query.order_by('desc').AndReturn(self.query) - self.query.limit(5).AndReturn(self.query) - self.mox.ReplayAll() - utils.paginate_query(self.query, self.model, 5, - ['user_id', 'project_id'], - sort_dirs=['asc', 'desc']) - - def test_invalid_sort_key_str(self): - self.assertEqual("Sort key supplied is invalid: None", - str(exception.InvalidSortKey())) - self.assertEqual("Sort key supplied is invalid: lol", - str(exception.InvalidSortKey("lol"))) - self.assertEqual("Sort key supplied is invalid: lol", - exception.InvalidSortKey("lol").message) - - def test_invalid_unicode_paramater_str(self): - self.assertEqual( - "Invalid Parameter: Encoding directive wasn't provided.", - str(exception.DBInvalidUnicodeParameter())) - self.assertEqual( - "Invalid Parameter: Encoding directive wasn't provided.", - exception.DBInvalidUnicodeParameter().message - ) - - def test_paginate_query_attribute_error(self): - sqlalchemy.asc(self.model.user_id).AndReturn('asc') - self.query.order_by('asc').AndReturn(self.query) - self.mox.ReplayAll() - self.assertRaises(exception.InvalidSortKey, - utils.paginate_query, self.query, - self.model, 5, ['user_id', 'non-existent key']) - - def test_paginate_query_attribute_error_invalid_sortkey(self): - self.assertRaises(exception.InvalidSortKey, - utils.paginate_query, self.query, - self.model, 5, ['bad_user_id']) - - def test_paginate_query_attribute_error_invalid_sortkey_2(self): - self.assertRaises(exception.InvalidSortKey, - utils.paginate_query, self.query, - self.model, 5, ['foo']) - - def test_paginate_query_attribute_error_invalid_sortkey_3(self): - self.assertRaises(exception.InvalidSortKey, - utils.paginate_query, self.query, - self.model, 5, ['asc-nullinvalid']) - - def test_paginate_query_assertion_error(self): - self.mox.ReplayAll() - self.assertRaises(AssertionError, - utils.paginate_query, self.query, - self.model, 5, ['user_id'], - marker=self.marker, - sort_dir='asc', sort_dirs=['asc']) - - def test_paginate_query_assertion_error_2(self): - self.mox.ReplayAll() - self.assertRaises(AssertionError, - utils.paginate_query, self.query, - self.model, 5, ['user_id'], - marker=self.marker, - sort_dir=None, sort_dirs=['asc', 'desk']) - - def test_paginate_query(self): - sqlalchemy.asc(self.model.user_id).AndReturn('asc_1') - self.query.order_by('asc_1').AndReturn(self.query) - sqlalchemy.desc(self.model.project_id).AndReturn('desc_1') - self.query.order_by('desc_1').AndReturn(self.query) - self.mox.StubOutWithMock(sqlalchemy.sql, 'and_') - sqlalchemy.sql.and_(mock.ANY).AndReturn('some_crit') - sqlalchemy.sql.and_(mock.ANY, mock.ANY).AndReturn('another_crit') - self.mox.StubOutWithMock(sqlalchemy.sql, 'or_') - sqlalchemy.sql.or_('some_crit', 'another_crit').AndReturn('some_f') - self.query.filter('some_f').AndReturn(self.query) - self.query.limit(5).AndReturn(self.query) - self.mox.ReplayAll() - utils.paginate_query(self.query, self.model, 5, - ['user_id', 'project_id'], - marker=self.marker, - sort_dirs=['asc', 'desc']) - - def test_paginate_query_null(self): - self.mox.StubOutWithMock(self.model.user_id, 'isnot') - self.model.user_id.isnot(None).AndReturn('asc_null_1') - sqlalchemy.desc('asc_null_1').AndReturn('asc_null_2') - self.query.order_by('asc_null_2').AndReturn(self.query) - - sqlalchemy.asc(self.model.user_id).AndReturn('asc_1') - self.query.order_by('asc_1').AndReturn(self.query) - - self.mox.StubOutWithMock(self.model.project_id, 'is_') - self.model.project_id.is_(None).AndReturn('desc_null_1') - sqlalchemy.desc('desc_null_1').AndReturn('desc_null_2') - self.query.order_by('desc_null_2').AndReturn(self.query) - - sqlalchemy.desc(self.model.project_id).AndReturn('desc_1') - self.query.order_by('desc_1').AndReturn(self.query) - - self.mox.StubOutWithMock(sqlalchemy.sql, 'and_') - sqlalchemy.sql.and_(mock.ANY).AndReturn('some_crit') - sqlalchemy.sql.and_(mock.ANY, mock.ANY).AndReturn('another_crit') - self.mox.StubOutWithMock(sqlalchemy.sql, 'or_') - sqlalchemy.sql.or_('some_crit', 'another_crit').AndReturn('some_f') - self.query.filter('some_f').AndReturn(self.query) - self.query.limit(5).AndReturn(self.query) - self.mox.ReplayAll() - utils.paginate_query(self.query, self.model, 5, - ['user_id', 'project_id'], - marker=self.marker, - sort_dirs=['asc-nullslast', 'desc-nullsfirst']) - - def test_paginate_query_value_error(self): - sqlalchemy.asc(self.model.user_id).AndReturn('asc_1') - self.query.order_by('asc_1').AndReturn(self.query) - self.mox.ReplayAll() - self.assertRaises(ValueError, utils.paginate_query, - self.query, self.model, 5, ['user_id', 'project_id'], - marker=self.marker, sort_dirs=['asc', 'mixed']) - - def test_paginate_on_hybrid(self): - sqlalchemy.asc(self.model.user_id).AndReturn('asc_1') - self.query.order_by('asc_1').AndReturn(self.query) - - sqlalchemy.desc(self.model.some_hybrid).AndReturn('desc_1') - self.query.order_by('desc_1').AndReturn(self.query) - - self.query.limit(5).AndReturn(self.query) - self.mox.ReplayAll() - utils.paginate_query(self.query, self.model, 5, - ['user_id', 'some_hybrid'], - sort_dirs=['asc', 'desc']) - - -class TestPaginateQueryActualSQL(test_base.BaseTestCase): - - def test_paginate_on_hybrid_assert_stmt(self): - s = Session() - q = s.query(FakeTable) - q = utils.paginate_query( - q, FakeTable, 5, - ['user_id', 'some_hybrid'], - sort_dirs=['asc', 'desc']) - expected_core_sql = ( - select([FakeTable]). - order_by(sqlalchemy.asc(FakeTable.user_id)). - order_by(sqlalchemy.desc(FakeTable.some_hybrid)). - limit(5) - ) - - self.assertEqual( - str(expected_core_sql.compile()), - str(q.statement.compile()) - ) - - -class TestMigrationUtils(db_test_base.DbTestCase): - - """Class for testing utils that are used in db migrations.""" - - def setUp(self): - super(TestMigrationUtils, self).setUp() - self.meta = MetaData(bind=self.engine) - self.conn = self.engine.connect() - self.addCleanup(self.meta.drop_all) - self.addCleanup(self.conn.close) - - def _populate_db_for_drop_duplicate_entries(self, engine, meta, - table_name): - values = [ - {'id': 11, 'a': 3, 'b': 10, 'c': 'abcdef'}, - {'id': 12, 'a': 5, 'b': 10, 'c': 'abcdef'}, - {'id': 13, 'a': 6, 'b': 10, 'c': 'abcdef'}, - {'id': 14, 'a': 7, 'b': 10, 'c': 'abcdef'}, - {'id': 21, 'a': 1, 'b': 20, 'c': 'aa'}, - {'id': 31, 'a': 1, 'b': 20, 'c': 'bb'}, - {'id': 41, 'a': 1, 'b': 30, 'c': 'aef'}, - {'id': 42, 'a': 2, 'b': 30, 'c': 'aef'}, - {'id': 43, 'a': 3, 'b': 30, 'c': 'aef'} - ] - - test_table = Table(table_name, meta, - Column('id', Integer, primary_key=True, - nullable=False), - Column('a', Integer), - Column('b', Integer), - Column('c', String(255)), - Column('deleted', Integer, default=0), - Column('deleted_at', DateTime), - Column('updated_at', DateTime)) - - test_table.create() - engine.execute(test_table.insert(), values) - return test_table, values - - def test_drop_old_duplicate_entries_from_table(self): - table_name = "__test_tmp_table__" - - test_table, values = self._populate_db_for_drop_duplicate_entries( - self.engine, self.meta, table_name) - utils.drop_old_duplicate_entries_from_table( - self.engine, table_name, False, 'b', 'c') - - uniq_values = set() - expected_ids = [] - for value in sorted(values, key=lambda x: x['id'], reverse=True): - uniq_value = (('b', value['b']), ('c', value['c'])) - if uniq_value in uniq_values: - continue - uniq_values.add(uniq_value) - expected_ids.append(value['id']) - - real_ids = [row[0] for row in - self.engine.execute(select([test_table.c.id])).fetchall()] - - self.assertEqual(len(expected_ids), len(real_ids)) - for id_ in expected_ids: - self.assertTrue(id_ in real_ids) - - def test_drop_dup_entries_in_file_conn(self): - table_name = "__test_tmp_table__" - tmp_db_file = self.create_tempfiles([['name', '']], ext='.sql')[0] - in_file_engine = session.EngineFacade( - 'sqlite:///%s' % tmp_db_file).get_engine() - meta = MetaData() - meta.bind = in_file_engine - test_table, values = self._populate_db_for_drop_duplicate_entries( - in_file_engine, meta, table_name) - utils.drop_old_duplicate_entries_from_table( - in_file_engine, table_name, False, 'b', 'c') - - def test_drop_old_duplicate_entries_from_table_soft_delete(self): - table_name = "__test_tmp_table__" - - table, values = self._populate_db_for_drop_duplicate_entries( - self.engine, self.meta, table_name) - utils.drop_old_duplicate_entries_from_table(self.engine, table_name, - True, 'b', 'c') - uniq_values = set() - expected_values = [] - soft_deleted_values = [] - - for value in sorted(values, key=lambda x: x['id'], reverse=True): - uniq_value = (('b', value['b']), ('c', value['c'])) - if uniq_value in uniq_values: - soft_deleted_values.append(value) - continue - uniq_values.add(uniq_value) - expected_values.append(value) - - base_select = table.select() - - rows_select = base_select.where(table.c.deleted != table.c.id) - row_ids = [row['id'] for row in - self.engine.execute(rows_select).fetchall()] - self.assertEqual(len(expected_values), len(row_ids)) - for value in expected_values: - self.assertTrue(value['id'] in row_ids) - - deleted_rows_select = base_select.where( - table.c.deleted == table.c.id) - deleted_rows_ids = [row['id'] for row in - self.engine.execute( - deleted_rows_select).fetchall()] - self.assertEqual(len(values) - len(row_ids), - len(deleted_rows_ids)) - for value in soft_deleted_values: - self.assertTrue(value['id'] in deleted_rows_ids) - - def test_change_deleted_column_type_does_not_drop_index(self): - table_name = 'abc' - - indexes = { - 'idx_a_deleted': ['a', 'deleted'], - 'idx_b_deleted': ['b', 'deleted'], - 'idx_a': ['a'] - } - - index_instances = [Index(name, *columns) - for name, columns in six.iteritems(indexes)] - - table = Table(table_name, self.meta, - Column('id', Integer, primary_key=True), - Column('a', String(255)), - Column('b', String(255)), - Column('deleted', Boolean), - *index_instances) - table.create() - utils.change_deleted_column_type_to_id_type(self.engine, table_name) - utils.change_deleted_column_type_to_boolean(self.engine, table_name) - - insp = reflection.Inspector.from_engine(self.engine) - real_indexes = insp.get_indexes(table_name) - self.assertEqual(3, len(real_indexes)) - for index in real_indexes: - name = index['name'] - self.assertIn(name, indexes) - self.assertEqual(set(indexes[name]), - set(index['column_names'])) - - def test_change_deleted_column_type_to_id_type_integer(self): - table_name = 'abc' - table = Table(table_name, self.meta, - Column('id', Integer, primary_key=True), - Column('deleted', Boolean)) - table.create() - utils.change_deleted_column_type_to_id_type(self.engine, table_name) - - table = utils.get_table(self.engine, table_name) - self.assertTrue(isinstance(table.c.deleted.type, Integer)) - - def test_change_deleted_column_type_to_id_type_string(self): - table_name = 'abc' - table = Table(table_name, self.meta, - Column('id', String(255), primary_key=True), - Column('deleted', Boolean)) - table.create() - utils.change_deleted_column_type_to_id_type(self.engine, table_name) - - table = utils.get_table(self.engine, table_name) - self.assertTrue(isinstance(table.c.deleted.type, String)) - - @db_test_base.backend_specific('sqlite') - def test_change_deleted_column_type_to_id_type_custom(self): - table_name = 'abc' - table = Table(table_name, self.meta, - Column('id', Integer, primary_key=True), - Column('foo', CustomType), - Column('deleted', Boolean)) - table.create() - - # reflection of custom types has been fixed upstream - if SA_VERSION < (0, 9, 0): - self.assertRaises(exception.ColumnError, - utils.change_deleted_column_type_to_id_type, - self.engine, table_name) - - fooColumn = Column('foo', CustomType()) - utils.change_deleted_column_type_to_id_type(self.engine, table_name, - foo=fooColumn) - - table = utils.get_table(self.engine, table_name) - - self.assertTrue(isinstance(table.c.deleted.type, Integer)) - - def test_change_deleted_column_type_to_boolean(self): - expected_types = {'mysql': mysql.TINYINT, - 'ibm_db_sa': SmallInteger} - table_name = 'abc' - table = Table(table_name, self.meta, - Column('id', Integer, primary_key=True), - Column('deleted', Integer)) - table.create() - - utils.change_deleted_column_type_to_boolean(self.engine, table_name) - - table = utils.get_table(self.engine, table_name) - self.assertIsInstance(table.c.deleted.type, - expected_types.get(self.engine.name, Boolean)) - - def test_change_deleted_column_type_to_boolean_with_fc(self): - expected_types = {'mysql': mysql.TINYINT, - 'ibm_db_sa': SmallInteger} - table_name_1 = 'abc' - table_name_2 = 'bcd' - - table_1 = Table(table_name_1, self.meta, - Column('id', Integer, primary_key=True), - Column('deleted', Integer)) - table_1.create() - - table_2 = Table(table_name_2, self.meta, - Column('id', Integer, primary_key=True), - Column('foreign_id', Integer, - ForeignKey('%s.id' % table_name_1)), - Column('deleted', Integer)) - table_2.create() - - utils.change_deleted_column_type_to_boolean(self.engine, table_name_2) - - table = utils.get_table(self.engine, table_name_2) - self.assertIsInstance(table.c.deleted.type, - expected_types.get(self.engine.name, Boolean)) - - @db_test_base.backend_specific('sqlite') - def test_change_deleted_column_type_to_boolean_type_custom(self): - table_name = 'abc' - table = Table(table_name, self.meta, - Column('id', Integer, primary_key=True), - Column('foo', CustomType), - Column('deleted', Integer)) - table.create() - - fooColumn = Column('foo', CustomType()) - utils.change_deleted_column_type_to_boolean(self.engine, table_name, - foo=fooColumn) - - table = utils.get_table(self.engine, table_name) - # NOTE(boris-42): There is no way to check has foo type CustomType. - # but sqlalchemy will set it to NullType. This has - # been fixed upstream in recent SA versions - if SA_VERSION < (0, 9, 0): - self.assertTrue(isinstance(table.c.foo.type, NullType)) - self.assertTrue(isinstance(table.c.deleted.type, Boolean)) - - def test_detect_boolean_deleted_constraint_detection(self): - table_name = 'abc' - table = Table(table_name, self.meta, - Column('id', Integer, primary_key=True), - Column('deleted', Boolean)) - ck = [ - const for const in table.constraints if - isinstance(const, CheckConstraint)][0] - - self.assertTrue(utils._is_deleted_column_constraint(ck)) - - self.assertFalse( - utils._is_deleted_column_constraint( - CheckConstraint("deleted > 5") - ) - ) - - @db_test_base.backend_specific('sqlite') - def test_change_deleted_column_type_sqlite_drops_check_constraint(self): - table_name = 'abc' - table = Table(table_name, self.meta, - Column('id', Integer, primary_key=True), - Column('deleted', Boolean)) - table.create() - - utils._change_deleted_column_type_to_id_type_sqlite(self.engine, - table_name) - table = Table(table_name, self.meta, autoload=True) - # NOTE(I159): if the CHECK constraint has been dropped (expected - # behavior), any integer value can be inserted, otherwise only 1 or 0. - self.engine.execute(table.insert({'deleted': 10})) - - def test_insert_from_select(self): - insert_table_name = "__test_insert_to_table__" - select_table_name = "__test_select_from_table__" - uuidstrs = [] - for unused in range(10): - uuidstrs.append(uuid.uuid4().hex) - insert_table = Table( - insert_table_name, self.meta, - Column('id', Integer, primary_key=True, - nullable=False, autoincrement=True), - Column('uuid', String(36), nullable=False)) - select_table = Table( - select_table_name, self.meta, - Column('id', Integer, primary_key=True, - nullable=False, autoincrement=True), - Column('uuid', String(36), nullable=False)) - - insert_table.create() - select_table.create() - # Add 10 rows to select_table - for uuidstr in uuidstrs: - ins_stmt = select_table.insert().values(uuid=uuidstr) - self.conn.execute(ins_stmt) - - # Select 4 rows in one chunk from select_table - column = select_table.c.id - query_insert = select([select_table], - select_table.c.id < 5).order_by(column) - insert_statement = utils.InsertFromSelect(insert_table, - query_insert) - result_insert = self.conn.execute(insert_statement) - # Verify we insert 4 rows - self.assertEqual(4, result_insert.rowcount) - - query_all = select([insert_table]).where( - insert_table.c.uuid.in_(uuidstrs)) - rows = self.conn.execute(query_all).fetchall() - # Verify we really have 4 rows in insert_table - self.assertEqual(4, len(rows)) - - def test_insert_from_select_with_specified_columns(self): - insert_table_name = "__test_insert_to_table__" - select_table_name = "__test_select_from_table__" - uuidstrs = [] - for unused in range(10): - uuidstrs.append(uuid.uuid4().hex) - insert_table = Table( - insert_table_name, self.meta, - Column('id', Integer, primary_key=True, - nullable=False, autoincrement=True), - Column('uuid', String(36), nullable=False)) - select_table = Table( - select_table_name, self.meta, - Column('id', Integer, primary_key=True, - nullable=False, autoincrement=True), - Column('uuid', String(36), nullable=False)) - - insert_table.create() - select_table.create() - # Add 10 rows to select_table - for uuidstr in uuidstrs: - ins_stmt = select_table.insert().values(uuid=uuidstr) - self.conn.execute(ins_stmt) - - # Select 4 rows in one chunk from select_table - column = select_table.c.id - query_insert = select([select_table], - select_table.c.id < 5).order_by(column) - insert_statement = utils.InsertFromSelect(insert_table, - query_insert, ['id', 'uuid']) - result_insert = self.conn.execute(insert_statement) - # Verify we insert 4 rows - self.assertEqual(4, result_insert.rowcount) - - query_all = select([insert_table]).where( - insert_table.c.uuid.in_(uuidstrs)) - rows = self.conn.execute(query_all).fetchall() - # Verify we really have 4 rows in insert_table - self.assertEqual(4, len(rows)) - - def test_insert_from_select_with_specified_columns_negative(self): - insert_table_name = "__test_insert_to_table__" - select_table_name = "__test_select_from_table__" - uuidstrs = [] - for unused in range(10): - uuidstrs.append(uuid.uuid4().hex) - insert_table = Table( - insert_table_name, self.meta, - Column('id', Integer, primary_key=True, - nullable=False, autoincrement=True), - Column('uuid', String(36), nullable=False)) - select_table = Table( - select_table_name, self.meta, - Column('id', Integer, primary_key=True, - nullable=False, autoincrement=True), - Column('uuid', String(36), nullable=False)) - - insert_table.create() - select_table.create() - # Add 10 rows to select_table - for uuidstr in uuidstrs: - ins_stmt = select_table.insert().values(uuid=uuidstr) - self.conn.execute(ins_stmt) - - # Select 4 rows in one chunk from select_table - column = select_table.c.id - query_insert = select([select_table], - select_table.c.id < 5).order_by(column) - insert_statement = utils.InsertFromSelect(insert_table, - query_insert, ['uuid', 'id']) - self.assertRaises(exception.DBError, self.conn.execute, - insert_statement) - - -class PostgesqlTestMigrations(TestMigrationUtils, - db_test_base.PostgreSQLOpportunisticTestCase): - - """Test migrations on PostgreSQL.""" - pass - - -class MySQLTestMigrations(TestMigrationUtils, - db_test_base.MySQLOpportunisticTestCase): - - """Test migrations on MySQL.""" - pass - - -class TestConnectionUtils(test_utils.BaseTestCase): - - def setUp(self): - super(TestConnectionUtils, self).setUp() - - self.full_credentials = {'backend': 'postgresql', - 'database': 'test', - 'user': 'dude', - 'passwd': 'pass'} - - self.connect_string = 'postgresql://dude:pass@localhost/test' - - # NOTE(rpodolyaka): mock the dialect parts, so that we don't depend - # on psycopg2 (or any other DBAPI implementation) in these tests - - @classmethod - def fake_dbapi(cls): - return mock.MagicMock() - patch_dbapi = mock.patch.object(psycopg2.PGDialect_psycopg2, 'dbapi', - new=fake_dbapi) - patch_dbapi.start() - self.addCleanup(patch_dbapi.stop) - - patch_onconnect = mock.patch.object(psycopg2.PGDialect_psycopg2, - 'on_connect') - patch_onconnect.start() - self.addCleanup(patch_onconnect.stop) - - def test_connect_string(self): - connect_string = utils.get_connect_string(**self.full_credentials) - self.assertEqual(self.connect_string, connect_string) - - def test_connect_string_sqlite(self): - sqlite_credentials = {'backend': 'sqlite', 'database': 'test.db'} - connect_string = utils.get_connect_string(**sqlite_credentials) - self.assertEqual('sqlite:///test.db', connect_string) - - def test_is_backend_avail(self): - self.mox.StubOutWithMock(sqlalchemy.engine.base.Engine, 'connect') - fake_connection = self.mox.CreateMockAnything() - fake_connection.close() - sqlalchemy.engine.base.Engine.connect().AndReturn(fake_connection) - self.mox.ReplayAll() - - self.assertTrue(utils.is_backend_avail(**self.full_credentials)) - - def test_is_backend_unavail(self): - log = self.useFixture(fixtures.FakeLogger()) - err = OperationalError("Can't connect to database", None, None) - error_msg = "The postgresql backend is unavailable: %s\n" % err - self.mox.StubOutWithMock(sqlalchemy.engine.base.Engine, 'connect') - sqlalchemy.engine.base.Engine.connect().AndRaise(err) - self.mox.ReplayAll() - self.assertFalse(utils.is_backend_avail(**self.full_credentials)) - self.assertEqual(error_msg, log.output) - - def test_ensure_backend_available(self): - self.mox.StubOutWithMock(sqlalchemy.engine.base.Engine, 'connect') - fake_connection = self.mox.CreateMockAnything() - fake_connection.close() - sqlalchemy.engine.base.Engine.connect().AndReturn(fake_connection) - self.mox.ReplayAll() - - eng = provision.Backend._ensure_backend_available(self.connect_string) - self.assertIsInstance(eng, sqlalchemy.engine.base.Engine) - self.assertEqual(self.connect_string, str(eng.url)) - - def test_ensure_backend_available_no_connection_raises(self): - log = self.useFixture(fixtures.FakeLogger()) - err = OperationalError("Can't connect to database", None, None) - self.mox.StubOutWithMock(sqlalchemy.engine.base.Engine, 'connect') - sqlalchemy.engine.base.Engine.connect().AndRaise(err) - self.mox.ReplayAll() - - exc = self.assertRaises( - exception.BackendNotAvailable, - provision.Backend._ensure_backend_available, self.connect_string - ) - self.assertEqual( - "Backend 'postgresql' is unavailable: " - "Could not connect", str(exc)) - self.assertEqual( - "The postgresql backend is unavailable: %s" % err, - log.output.strip()) - - def test_ensure_backend_available_no_dbapi_raises(self): - log = self.useFixture(fixtures.FakeLogger()) - self.mox.StubOutWithMock(sqlalchemy, 'create_engine') - sqlalchemy.create_engine( - sa_url.make_url(self.connect_string)).AndRaise( - ImportError("Can't import DBAPI module foobar")) - self.mox.ReplayAll() - - exc = self.assertRaises( - exception.BackendNotAvailable, - provision.Backend._ensure_backend_available, self.connect_string - ) - self.assertEqual( - "Backend 'postgresql' is unavailable: " - "No DBAPI installed", str(exc)) - self.assertEqual( - "The postgresql backend is unavailable: Can't import " - "DBAPI module foobar", log.output.strip()) - - def test_get_db_connection_info(self): - conn_pieces = parse.urlparse(self.connect_string) - self.assertEqual(('dude', 'pass', 'test', 'localhost'), - utils.get_db_connection_info(conn_pieces)) - - def test_connect_string_host(self): - self.full_credentials['host'] = 'myhost' - connect_string = utils.get_connect_string(**self.full_credentials) - self.assertEqual('postgresql://dude:pass@myhost/test', connect_string) - - -class MyModelSoftDeletedProjectId(declarative_base(), models.ModelBase, - models.SoftDeleteMixin): - __tablename__ = 'soft_deleted_project_id_test_model' - id = Column(Integer, primary_key=True) - project_id = Column(Integer) - - -class MyModel(declarative_base(), models.ModelBase): - __tablename__ = 'test_model' - id = Column(Integer, primary_key=True) - - -class MyModelSoftDeleted(declarative_base(), models.ModelBase, - models.SoftDeleteMixin): - __tablename__ = 'soft_deleted_test_model' - id = Column(Integer, primary_key=True) - - -class TestModelQuery(test_base.BaseTestCase): - - def setUp(self): - super(TestModelQuery, self).setUp() - - self.session = mock.MagicMock() - self.session.query.return_value = self.session.query - self.session.query.filter.return_value = self.session.query - - def test_wrong_model(self): - self.assertRaises(TypeError, utils.model_query, - FakeModel, session=self.session) - - def test_no_soft_deleted(self): - self.assertRaises(ValueError, utils.model_query, - MyModel, session=self.session, deleted=True) - - def test_deleted_false(self): - mock_query = utils.model_query( - MyModelSoftDeleted, session=self.session, deleted=False) - - deleted_filter = mock_query.filter.call_args[0][0] - self.assertEqual('soft_deleted_test_model.deleted = :deleted_1', - str(deleted_filter)) - self.assertEqual(deleted_filter.right.value, - MyModelSoftDeleted.__mapper__.c.deleted.default.arg) - - def test_deleted_true(self): - mock_query = utils.model_query( - MyModelSoftDeleted, session=self.session, deleted=True) - - deleted_filter = mock_query.filter.call_args[0][0] - self.assertEqual(str(deleted_filter), - 'soft_deleted_test_model.deleted != :deleted_1') - self.assertEqual(deleted_filter.right.value, - MyModelSoftDeleted.__mapper__.c.deleted.default.arg) - - @mock.patch.object(utils, "_read_deleted_filter") - def test_no_deleted_value(self, _read_deleted_filter): - utils.model_query(MyModelSoftDeleted, session=self.session) - self.assertEqual(0, _read_deleted_filter.call_count) - - def test_project_filter(self): - project_id = 10 - - mock_query = utils.model_query( - MyModelSoftDeletedProjectId, session=self.session, - project_only=True, project_id=project_id) - - deleted_filter = mock_query.filter.call_args[0][0] - self.assertEqual( - 'soft_deleted_project_id_test_model.project_id = :project_id_1', - str(deleted_filter)) - self.assertEqual(project_id, deleted_filter.right.value) - - def test_project_filter_wrong_model(self): - self.assertRaises(ValueError, utils.model_query, - MyModelSoftDeleted, session=self.session, - project_id=10) - - def test_project_filter_allow_none(self): - mock_query = utils.model_query( - MyModelSoftDeletedProjectId, - session=self.session, project_id=(10, None)) - - self.assertEqual( - 'soft_deleted_project_id_test_model.project_id' - ' IN (:project_id_1, NULL)', - str(mock_query.filter.call_args[0][0]) - ) - - def test_model_query_common(self): - utils.model_query(MyModel, args=(MyModel.id,), session=self.session) - self.session.query.assert_called_with(MyModel.id) - - -class TestUtils(db_test_base.DbTestCase): - def setUp(self): - super(TestUtils, self).setUp() - meta = MetaData(bind=self.engine) - self.test_table = Table( - 'test_table', - meta, - Column('a', Integer), - Column('b', Integer) - ) - self.test_table.create() - self.addCleanup(meta.drop_all) - - def test_index_exists(self): - self.assertFalse(utils.index_exists(self.engine, 'test_table', - 'new_index')) - Index('new_index', self.test_table.c.a).create(self.engine) - self.assertTrue(utils.index_exists(self.engine, 'test_table', - 'new_index')) - - def test_add_index(self): - self.assertFalse(utils.index_exists(self.engine, 'test_table', - 'new_index')) - utils.add_index(self.engine, 'test_table', 'new_index', ('a',)) - self.assertTrue(utils.index_exists(self.engine, 'test_table', - 'new_index')) - - def test_add_existing_index(self): - Index('new_index', self.test_table.c.a).create(self.engine) - self.assertRaises(ValueError, utils.add_index, self.engine, - 'test_table', 'new_index', ('a',)) - - def test_drop_index(self): - Index('new_index', self.test_table.c.a).create(self.engine) - utils.drop_index(self.engine, 'test_table', 'new_index') - self.assertFalse(utils.index_exists(self.engine, 'test_table', - 'new_index')) - - def test_drop_unexisting_index(self): - self.assertRaises(ValueError, utils.drop_index, self.engine, - 'test_table', 'new_index') - - @mock.patch('oslo_db.sqlalchemy.utils.drop_index') - @mock.patch('oslo_db.sqlalchemy.utils.add_index') - def test_change_index_columns(self, add_index, drop_index): - utils.change_index_columns(self.engine, 'test_table', 'a_index', - ('a',)) - utils.drop_index.assert_called_once_with(self.engine, 'test_table', - 'a_index') - utils.add_index.assert_called_once_with(self.engine, 'test_table', - 'a_index', ('a',)) - - def test_column_exists(self): - for col in ['a', 'b']: - self.assertTrue(utils.column_exists(self.engine, 'test_table', - col)) - self.assertFalse(utils.column_exists(self.engine, 'test_table', - 'fake_column')) - - -class TestUtilsMysqlOpportunistically( - TestUtils, db_test_base.MySQLOpportunisticTestCase): - pass - - -class TestUtilsPostgresqlOpportunistically( - TestUtils, db_test_base.PostgreSQLOpportunisticTestCase): - pass - - -class TestDialectFunctionDispatcher(test_base.BaseTestCase): - def _single_fixture(self): - callable_fn = mock.Mock() - - dispatcher = orig = utils.dispatch_for_dialect("*")( - callable_fn.default) - dispatcher = dispatcher.dispatch_for("sqlite")(callable_fn.sqlite) - dispatcher = dispatcher.dispatch_for("mysql+pymysql")( - callable_fn.mysql_pymysql) - dispatcher = dispatcher.dispatch_for("mysql")( - callable_fn.mysql) - dispatcher = dispatcher.dispatch_for("postgresql")( - callable_fn.postgresql) - - self.assertTrue(dispatcher is orig) - - return dispatcher, callable_fn - - def _multiple_fixture(self): - callable_fn = mock.Mock() - - for targ in [ - callable_fn.default, - callable_fn.sqlite, - callable_fn.mysql, - callable_fn.mysql_pymysql, - callable_fn.postgresql, - callable_fn.postgresql_psycopg2, - callable_fn.pyodbc - ]: - targ.return_value = None - - dispatcher = orig = utils.dispatch_for_dialect("*", multiple=True)( - callable_fn.default) - dispatcher = dispatcher.dispatch_for("sqlite")(callable_fn.sqlite) - dispatcher = dispatcher.dispatch_for("mysql+pymysql")( - callable_fn.mysql_pymysql) - dispatcher = dispatcher.dispatch_for("mysql")( - callable_fn.mysql) - dispatcher = dispatcher.dispatch_for("postgresql+*")( - callable_fn.postgresql) - dispatcher = dispatcher.dispatch_for("postgresql+psycopg2")( - callable_fn.postgresql_psycopg2) - dispatcher = dispatcher.dispatch_for("*+pyodbc")( - callable_fn.pyodbc) - - self.assertTrue(dispatcher is orig) - - return dispatcher, callable_fn - - def test_single(self): - - dispatcher, callable_fn = self._single_fixture() - dispatcher("sqlite://", 1) - dispatcher("postgresql+psycopg2://u:p@h/t", 2) - dispatcher("mysql+pymysql://u:p@h/t", 3) - dispatcher("mysql://u:p@h/t", 4) - dispatcher("mysql+mysqlconnector://u:p@h/t", 5) - - self.assertEqual( - [ - mock.call.sqlite('sqlite://', 1), - mock.call.postgresql("postgresql+psycopg2://u:p@h/t", 2), - mock.call.mysql_pymysql("mysql+pymysql://u:p@h/t", 3), - mock.call.mysql("mysql://u:p@h/t", 4), - mock.call.mysql("mysql+mysqlconnector://u:p@h/t", 5), - ], - callable_fn.mock_calls) - - def test_single_kwarg(self): - dispatcher, callable_fn = self._single_fixture() - dispatcher("sqlite://", foo='bar') - dispatcher("postgresql+psycopg2://u:p@h/t", 1, x='y') - - self.assertEqual( - [ - mock.call.sqlite('sqlite://', foo='bar'), - mock.call.postgresql( - "postgresql+psycopg2://u:p@h/t", - 1, x='y'), - ], - callable_fn.mock_calls) - - def test_dispatch_on_target(self): - callable_fn = mock.Mock() - - @utils.dispatch_for_dialect("*") - def default_fn(url, x, y): - callable_fn.default(url, x, y) - - @default_fn.dispatch_for("sqlite") - def sqlite_fn(url, x, y): - callable_fn.sqlite(url, x, y) - default_fn.dispatch_on_drivername("*")(url, x, y) - - default_fn("sqlite://", 4, 5) - self.assertEqual( - [ - mock.call.sqlite("sqlite://", 4, 5), - mock.call.default("sqlite://", 4, 5) - ], - callable_fn.mock_calls - ) - - def test_single_no_dispatcher(self): - callable_fn = mock.Mock() - - dispatcher = utils.dispatch_for_dialect("sqlite")(callable_fn.sqlite) - dispatcher = dispatcher.dispatch_for("mysql")(callable_fn.mysql) - exc = self.assertRaises( - ValueError, - dispatcher, "postgresql://s:t@localhost/test" - ) - self.assertEqual( - "No default function found for driver: 'postgresql+psycopg2'", - str(exc) - ) - - def test_multiple_no_dispatcher(self): - callable_fn = mock.Mock() - - dispatcher = utils.dispatch_for_dialect("sqlite", multiple=True)( - callable_fn.sqlite) - dispatcher = dispatcher.dispatch_for("mysql")(callable_fn.mysql) - dispatcher("postgresql://s:t@localhost/test") - self.assertEqual( - [], callable_fn.mock_calls - ) - - def test_multiple_no_driver(self): - callable_fn = mock.Mock( - default=mock.Mock(return_value=None), - sqlite=mock.Mock(return_value=None) - ) - - dispatcher = utils.dispatch_for_dialect("*", multiple=True)( - callable_fn.default) - dispatcher = dispatcher.dispatch_for("sqlite")( - callable_fn.sqlite) - - dispatcher.dispatch_on_drivername("sqlite")("foo") - self.assertEqual( - [mock.call.sqlite("foo"), mock.call.default("foo")], - callable_fn.mock_calls - ) - - def test_multiple_nesting(self): - callable_fn = mock.Mock( - default=mock.Mock(return_value=None), - mysql=mock.Mock(return_value=None) - ) - - dispatcher = utils.dispatch_for_dialect("*", multiple=True)( - callable_fn.default) - - dispatcher = dispatcher.dispatch_for("mysql+mysqlconnector")( - dispatcher.dispatch_for("mysql+mysqldb")( - callable_fn.mysql - ) - ) - - mysqldb_url = sqlalchemy.engine.url.make_url("mysql+mysqldb://") - mysqlconnector_url = sqlalchemy.engine.url.make_url( - "mysql+mysqlconnector://") - sqlite_url = sqlalchemy.engine.url.make_url("sqlite://") - - dispatcher(mysqldb_url, 1) - dispatcher(mysqlconnector_url, 2) - dispatcher(sqlite_url, 3) - - self.assertEqual( - [ - mock.call.mysql(mysqldb_url, 1), - mock.call.default(mysqldb_url, 1), - mock.call.mysql(mysqlconnector_url, 2), - mock.call.default(mysqlconnector_url, 2), - mock.call.default(sqlite_url, 3) - ], - callable_fn.mock_calls - ) - - def test_single_retval(self): - dispatcher, callable_fn = self._single_fixture() - callable_fn.mysql_pymysql.return_value = 5 - - self.assertEqual( - 5, dispatcher("mysql+pymysql://u:p@h/t", 3) - ) - - def test_engine(self): - eng = sqlalchemy.create_engine("sqlite:///path/to/my/db.db") - dispatcher, callable_fn = self._single_fixture() - - dispatcher(eng) - self.assertEqual( - [mock.call.sqlite(eng)], - callable_fn.mock_calls - ) - - def test_url_pymysql(self): - url = sqlalchemy.engine.url.make_url( - "mysql+pymysql://scott:tiger@localhost/test") - dispatcher, callable_fn = self._single_fixture() - - dispatcher(url, 15) - self.assertEqual( - [mock.call.mysql_pymysql(url, 15)], - callable_fn.mock_calls - ) - - def test_url_mysql_generic(self): - url = sqlalchemy.engine.url.make_url( - "mysql://scott:tiger@localhost/test") - dispatcher, callable_fn = self._single_fixture() - - dispatcher(url, 15) - self.assertEqual( - [mock.call.mysql(url, 15)], - callable_fn.mock_calls - ) - - def test_invalid_target(self): - dispatcher, callable_fn = self._single_fixture() - - exc = self.assertRaises( - ValueError, - dispatcher, 20 - ) - self.assertEqual("Invalid target type: 20", str(exc)) - - def test_invalid_dispatch(self): - callable_fn = mock.Mock() - - dispatcher = utils.dispatch_for_dialect("*")(callable_fn.default) - - exc = self.assertRaises( - ValueError, - dispatcher.dispatch_for("+pyodbc"), callable_fn.pyodbc - ) - self.assertEqual( - "Couldn't parse database[+driver]: '+pyodbc'", - str(exc) - ) - - def test_single_only_one_target(self): - callable_fn = mock.Mock() - - dispatcher = utils.dispatch_for_dialect("*")(callable_fn.default) - dispatcher = dispatcher.dispatch_for("sqlite")(callable_fn.sqlite) - - exc = self.assertRaises( - TypeError, - dispatcher.dispatch_for("sqlite"), callable_fn.sqlite2 - ) - self.assertEqual( - "Multiple functions for expression 'sqlite'", str(exc) - ) - - def test_multiple(self): - dispatcher, callable_fn = self._multiple_fixture() - - dispatcher("postgresql+pyodbc://", 1) - dispatcher("mysql+pymysql://", 2) - dispatcher("ibm_db_sa+db2://", 3) - dispatcher("postgresql+psycopg2://", 4) - dispatcher("postgresql://", 5) - - # TODO(zzzeek): there is a deterministic order here, but we might - # want to tweak it, or maybe provide options. default first? - # most specific first? is *+pyodbc or postgresql+* more specific? - self.assertEqual( - [ - mock.call.postgresql('postgresql+pyodbc://', 1), - mock.call.pyodbc('postgresql+pyodbc://', 1), - mock.call.default('postgresql+pyodbc://', 1), - mock.call.mysql_pymysql('mysql+pymysql://', 2), - mock.call.mysql('mysql+pymysql://', 2), - mock.call.default('mysql+pymysql://', 2), - mock.call.default('ibm_db_sa+db2://', 3), - mock.call.postgresql_psycopg2('postgresql+psycopg2://', 4), - mock.call.postgresql('postgresql+psycopg2://', 4), - mock.call.default('postgresql+psycopg2://', 4), - # note this is called because we resolve the default - # DBAPI for the url - mock.call.postgresql_psycopg2('postgresql://', 5), - mock.call.postgresql('postgresql://', 5), - mock.call.default('postgresql://', 5), - ], - callable_fn.mock_calls - ) - - def test_multiple_no_return_value(self): - dispatcher, callable_fn = self._multiple_fixture() - callable_fn.sqlite.return_value = 5 - - exc = self.assertRaises( - TypeError, - dispatcher, "sqlite://" - ) - self.assertEqual( - "Return value not allowed for multiple filtered function", - str(exc) - ) - - -class TestGetInnoDBTables(db_test_base.MySQLOpportunisticTestCase): - - def test_all_tables_use_innodb(self): - self.engine.execute("CREATE TABLE customers " - "(a INT, b CHAR (20), INDEX (a)) ENGINE=InnoDB") - self.assertEqual([], utils.get_non_innodb_tables(self.engine)) - - def test_all_tables_use_innodb_false(self): - self.engine.execute("CREATE TABLE employee " - "(i INT) ENGINE=MEMORY") - self.assertEqual(['employee'], - utils.get_non_innodb_tables(self.engine)) - - def test_skip_tables_use_default_value(self): - self.engine.execute("CREATE TABLE migrate_version " - "(i INT) ENGINE=MEMORY") - self.assertEqual([], - utils.get_non_innodb_tables(self.engine)) - - def test_skip_tables_use_passed_value(self): - self.engine.execute("CREATE TABLE some_table " - "(i INT) ENGINE=MEMORY") - self.assertEqual([], - utils.get_non_innodb_tables( - self.engine, skip_tables=('some_table',))) - - def test_skip_tables_use_empty_list(self): - self.engine.execute("CREATE TABLE some_table_3 " - "(i INT) ENGINE=MEMORY") - self.assertEqual(['some_table_3'], - utils.get_non_innodb_tables( - self.engine, skip_tables=())) - - def test_skip_tables_use_several_values(self): - self.engine.execute("CREATE TABLE some_table_1 " - "(i INT) ENGINE=MEMORY") - self.engine.execute("CREATE TABLE some_table_2 " - "(i INT) ENGINE=MEMORY") - self.assertEqual([], - utils.get_non_innodb_tables( - self.engine, - skip_tables=('some_table_1', 'some_table_2'))) diff --git a/oslo_db/tests/test_api.py b/oslo_db/tests/test_api.py deleted file mode 100644 index 6863790..0000000 --- a/oslo_db/tests/test_api.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for DB API.""" - -import mock -from oslo_config import cfg -from oslo_utils import importutils - -from oslo_db import api -from oslo_db import exception -from oslo_db.tests import utils as test_utils - -sqla = importutils.try_import('sqlalchemy') -if not sqla: - raise ImportError("Unable to import module 'sqlalchemy'.") - - -def get_backend(): - return DBAPI() - - -class DBAPI(object): - def _api_raise(self, *args, **kwargs): - """Simulate raising a database-has-gone-away error - - This method creates a fake OperationalError with an ID matching - a valid MySQL "database has gone away" situation. It also decrements - the error_counter so that we can artificially keep track of - how many times this function is called by the wrapper. When - error_counter reaches zero, this function returns True, simulating - the database becoming available again and the query succeeding. - """ - - if self.error_counter > 0: - self.error_counter -= 1 - orig = sqla.exc.DBAPIError(False, False, False) - orig.args = [2006, 'Test raise operational error'] - e = exception.DBConnectionError(orig) - raise e - else: - return True - - def api_raise_default(self, *args, **kwargs): - return self._api_raise(*args, **kwargs) - - @api.safe_for_db_retry - def api_raise_enable_retry(self, *args, **kwargs): - return self._api_raise(*args, **kwargs) - - def api_class_call1(_self, *args, **kwargs): - return args, kwargs - - -class DBAPITestCase(test_utils.BaseTestCase): - def test_dbapi_full_path_module_method(self): - dbapi = api.DBAPI('oslo_db.tests.test_api') - result = dbapi.api_class_call1(1, 2, kwarg1='meow') - expected = ((1, 2), {'kwarg1': 'meow'}) - self.assertEqual(expected, result) - - def test_dbapi_unknown_invalid_backend(self): - self.assertRaises(ImportError, api.DBAPI, 'tests.unit.db.not_existent') - - def test_dbapi_lazy_loading(self): - dbapi = api.DBAPI('oslo_db.tests.test_api', lazy=True) - - self.assertIsNone(dbapi._backend) - dbapi.api_class_call1(1, 'abc') - self.assertIsNotNone(dbapi._backend) - - def test_dbapi_from_config(self): - conf = cfg.ConfigOpts() - - dbapi = api.DBAPI.from_config(conf, - backend_mapping={'sqlalchemy': __name__}) - self.assertIsNotNone(dbapi._backend) - - -class DBReconnectTestCase(DBAPITestCase): - def setUp(self): - super(DBReconnectTestCase, self).setUp() - - self.test_db_api = DBAPI() - patcher = mock.patch(__name__ + '.get_backend', - return_value=self.test_db_api) - patcher.start() - self.addCleanup(patcher.stop) - - def test_raise_connection_error(self): - self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}) - - self.test_db_api.error_counter = 5 - self.assertRaises(exception.DBConnectionError, self.dbapi._api_raise) - - def test_raise_connection_error_decorated(self): - self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}) - - self.test_db_api.error_counter = 5 - self.assertRaises(exception.DBConnectionError, - self.dbapi.api_raise_enable_retry) - self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry') - - def test_raise_connection_error_enabled(self): - self.dbapi = api.DBAPI('sqlalchemy', - {'sqlalchemy': __name__}, - use_db_reconnect=True) - - self.test_db_api.error_counter = 5 - self.assertRaises(exception.DBConnectionError, - self.dbapi.api_raise_default) - self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry') - - @mock.patch('oslo_db.api.time.sleep', return_value=None) - def test_retry_one(self, p_time_sleep): - self.dbapi = api.DBAPI('sqlalchemy', - {'sqlalchemy': __name__}, - use_db_reconnect=True, - retry_interval=1) - - try: - func = self.dbapi.api_raise_enable_retry - self.test_db_api.error_counter = 1 - self.assertTrue(func(), 'Single retry did not succeed.') - except Exception: - self.fail('Single retry raised an un-wrapped error.') - p_time_sleep.assert_called_with(1) - self.assertEqual( - 0, self.test_db_api.error_counter, - 'Counter not decremented, retry logic probably failed.') - - @mock.patch('oslo_db.api.time.sleep', return_value=None) - def test_retry_two(self, p_time_sleep): - self.dbapi = api.DBAPI('sqlalchemy', - {'sqlalchemy': __name__}, - use_db_reconnect=True, - retry_interval=1, - inc_retry_interval=False) - - try: - func = self.dbapi.api_raise_enable_retry - self.test_db_api.error_counter = 2 - self.assertTrue(func(), 'Multiple retry did not succeed.') - except Exception: - self.fail('Multiple retry raised an un-wrapped error.') - p_time_sleep.assert_called_with(1) - self.assertEqual( - 0, self.test_db_api.error_counter, - 'Counter not decremented, retry logic probably failed.') - - @mock.patch('oslo_db.api.time.sleep', return_value=None) - def test_retry_float_interval(self, p_time_sleep): - self.dbapi = api.DBAPI('sqlalchemy', - {'sqlalchemy': __name__}, - use_db_reconnect=True, - retry_interval=0.5) - try: - func = self.dbapi.api_raise_enable_retry - self.test_db_api.error_counter = 1 - self.assertTrue(func(), 'Single retry did not succeed.') - except Exception: - self.fail('Single retry raised an un-wrapped error.') - - p_time_sleep.assert_called_with(0.5) - self.assertEqual( - 0, self.test_db_api.error_counter, - 'Counter not decremented, retry logic probably failed.') - - @mock.patch('oslo_db.api.time.sleep', return_value=None) - def test_retry_until_failure(self, p_time_sleep): - self.dbapi = api.DBAPI('sqlalchemy', - {'sqlalchemy': __name__}, - use_db_reconnect=True, - retry_interval=1, - inc_retry_interval=False, - max_retries=3) - - func = self.dbapi.api_raise_enable_retry - self.test_db_api.error_counter = 5 - self.assertRaises( - exception.DBError, func, - 'Retry of permanent failure did not throw DBError exception.') - p_time_sleep.assert_called_with(1) - self.assertNotEqual( - 0, self.test_db_api.error_counter, - 'Retry did not stop after sql_max_retries iterations.') - - -class DBRetryRequestCase(DBAPITestCase): - def test_retry_wrapper_succeeds(self): - @api.wrap_db_retry(max_retries=10) - def some_method(): - pass - - some_method() - - def test_retry_wrapper_reaches_limit(self): - max_retries = 2 - - @api.wrap_db_retry(max_retries=max_retries) - def some_method(res): - res['result'] += 1 - raise exception.RetryRequest(ValueError()) - - res = {'result': 0} - self.assertRaises(ValueError, some_method, res) - self.assertEqual(max_retries + 1, res['result']) - - def test_retry_wrapper_exception_checker(self): - - def exception_checker(exc): - return isinstance(exc, ValueError) and exc.args[0] < 5 - - @api.wrap_db_retry(max_retries=10, - exception_checker=exception_checker) - def some_method(res): - res['result'] += 1 - raise ValueError(res['result']) - - res = {'result': 0} - self.assertRaises(ValueError, some_method, res) - # our exception checker should have stopped returning True after 5 - self.assertEqual(5, res['result']) - - @mock.patch.object(DBAPI, 'api_class_call1') - @mock.patch.object(api, 'wrap_db_retry') - def test_mocked_methods_are_not_wrapped(self, mocked_wrap, mocked_method): - dbapi = api.DBAPI('oslo_db.tests.test_api') - dbapi.api_class_call1() - - self.assertFalse(mocked_wrap.called) - - @mock.patch('oslo_db.api.LOG') - def test_retry_wrapper_non_db_error_not_logged(self, mock_log): - # Tests that if the retry wrapper hits a non-db error (raised from the - # wrapped function), then that exception is reraised but not logged. - - @api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) - def some_method(): - raise AttributeError('test') - - self.assertRaises(AttributeError, some_method) - self.assertFalse(mock_log.called) diff --git a/oslo_db/tests/test_concurrency.py b/oslo_db/tests/test_concurrency.py deleted file mode 100644 index 051da2b..0000000 --- a/oslo_db/tests/test_concurrency.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2014 Mirantis.inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -import mock - -from oslo_db import concurrency -from oslo_db.tests import utils as test_utils - -FAKE_BACKEND_MAPPING = {'sqlalchemy': 'fake.db.sqlalchemy.api'} - - -class TpoolDbapiWrapperTestCase(test_utils.BaseTestCase): - - def setUp(self): - super(TpoolDbapiWrapperTestCase, self).setUp() - self.db_api = concurrency.TpoolDbapiWrapper( - conf=self.conf, backend_mapping=FAKE_BACKEND_MAPPING) - - # NOTE(akurilin): We are not going to add `eventlet` to `oslo_db` in - # requirements (`requirements.txt` and `test-requirements.txt`) due to - # the following reasons: - # - supporting of eventlet's thread pooling is totally optional; - # - we don't need to test `tpool.Proxy` functionality itself, - # because it's a tool from the third party library; - # - `eventlet` would prevent us from running unit tests on Python 3.x - # versions, because it doesn't support them yet. - # - # As we don't test `tpool.Proxy`, we can safely mock it in tests. - - self.proxy = mock.MagicMock() - self.eventlet = mock.MagicMock() - self.eventlet.tpool.Proxy.return_value = self.proxy - sys.modules['eventlet'] = self.eventlet - self.addCleanup(sys.modules.pop, 'eventlet', None) - - @mock.patch('oslo_db.api.DBAPI') - def test_db_api_common(self, mock_db_api): - # test context: - # CONF.database.use_tpool == False - # eventlet is installed - # expected result: - # TpoolDbapiWrapper should wrap DBAPI - - fake_db_api = mock.MagicMock() - mock_db_api.from_config.return_value = fake_db_api - - # get access to some db-api method - self.db_api.fake_call_1 - - mock_db_api.from_config.assert_called_once_with( - conf=self.conf, backend_mapping=FAKE_BACKEND_MAPPING) - self.assertEqual(fake_db_api, self.db_api._db_api) - self.assertFalse(self.eventlet.tpool.Proxy.called) - - # get access to other db-api method to be sure that api didn't changed - self.db_api.fake_call_2 - - self.assertEqual(fake_db_api, self.db_api._db_api) - self.assertFalse(self.eventlet.tpool.Proxy.called) - self.assertEqual(1, mock_db_api.from_config.call_count) - - @mock.patch('oslo_db.api.DBAPI') - def test_db_api_config_change(self, mock_db_api): - # test context: - # CONF.database.use_tpool == True - # eventlet is installed - # expected result: - # TpoolDbapiWrapper should wrap tpool proxy - - fake_db_api = mock.MagicMock() - mock_db_api.from_config.return_value = fake_db_api - self.conf.set_override('use_tpool', True, group='database') - - # get access to some db-api method - self.db_api.fake_call - - # CONF.database.use_tpool is True, so we get tpool proxy in this case - mock_db_api.from_config.assert_called_once_with( - conf=self.conf, backend_mapping=FAKE_BACKEND_MAPPING) - self.eventlet.tpool.Proxy.assert_called_once_with(fake_db_api) - self.assertEqual(self.proxy, self.db_api._db_api) - - @mock.patch('oslo_db.api.DBAPI') - def test_db_api_without_installed_eventlet(self, mock_db_api): - # test context: - # CONF.database.use_tpool == True - # eventlet is not installed - # expected result: - # raise ImportError - - self.conf.set_override('use_tpool', True, group='database') - sys.modules['eventlet'] = None - - self.assertRaises(ImportError, getattr, self.db_api, 'fake') diff --git a/oslo_db/tests/utils.py b/oslo_db/tests/utils.py deleted file mode 100644 index 00eb468..0000000 --- a/oslo_db/tests/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib - -from oslo_config import cfg -from oslotest import base as test_base -from oslotest import moxstubout -import six - - -if six.PY3: - @contextlib.contextmanager - def nested(*contexts): - with contextlib.ExitStack() as stack: - yield [stack.enter_context(c) for c in contexts] -else: - nested = contextlib.nested - - -class BaseTestCase(test_base.BaseTestCase): - def setUp(self, conf=cfg.CONF): - super(BaseTestCase, self).setUp() - moxfixture = self.useFixture(moxstubout.MoxStubout()) - self.mox = moxfixture.mox - self.stubs = moxfixture.stubs - self.conf = conf - self.addCleanup(self.conf.reset) diff --git a/releasenotes/notes/add-reno-e5c2f63e73c25959.yaml b/releasenotes/notes/add-reno-e5c2f63e73c25959.yaml deleted file mode 100644 index 05b1c80..0000000 --- a/releasenotes/notes/add-reno-e5c2f63e73c25959.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -other: - - Introduce reno for deployer release notes. \ No newline at end of file diff --git a/releasenotes/notes/connection_debug_min_max-bf6d53d49be7ca52.yaml b/releasenotes/notes/connection_debug_min_max-bf6d53d49be7ca52.yaml deleted file mode 100644 index 7b0713b..0000000 --- a/releasenotes/notes/connection_debug_min_max-bf6d53d49be7ca52.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - The allowed values for the ``connection_debug`` option are now restricted to - the range between 0 and 100 (inclusive). Previously a number lower than 0 - or higher than 100 could be given without error. But now, a - ``ConfigFileValueError`` will be raised when the option value is outside this - range. diff --git a/releasenotes/notes/deprecate_config_sqlite_db-bd41d49343049319.yaml b/releasenotes/notes/deprecate_config_sqlite_db-bd41d49343049319.yaml deleted file mode 100644 index d20da8a..0000000 --- a/releasenotes/notes/deprecate_config_sqlite_db-bd41d49343049319.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - | - The configuration option ``sqlite_db`` is now deprecated and - will be removed in the future. Please use configuration - option ``connection`` or ``slave_connection`` to connect to the database. - diff --git a/releasenotes/notes/enginefacade_decorators-4660862fe22d2669.yaml b/releasenotes/notes/enginefacade_decorators-4660862fe22d2669.yaml deleted file mode 100644 index e176f28..0000000 --- a/releasenotes/notes/enginefacade_decorators-4660862fe22d2669.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - enginefacade decorators can now be used for class and instance methods, - which implicitly receive the first positional argument. Previously, it - was required that all decorated functions receive a context value as the - first argument. diff --git a/releasenotes/notes/increase-default-max-overflow-0af787268807f926.yaml b/releasenotes/notes/increase-default-max-overflow-0af787268807f926.yaml deleted file mode 100644 index 6547ac2..0000000 --- a/releasenotes/notes/increase-default-max-overflow-0af787268807f926.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -upgrade: - - | - The default value of ``max_overflow`` config option - has been increased from 10 to 50 in order to allow - OpenStack services heavily using DBs to better handle - spikes of concurrent requests and lower the probability - of getting a pool timeout issue. - - This change potentially leads to increasing of the number - of open connections to an RDBMS server. Depending on the - configuration, you may see "too many connections" errors - in logs of OpenStack services / RDBMS server. The max limit of - connections can be set by the means of these config options: - - http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_connections - http://www.postgresql.org/docs/current/static/runtime-config-connection.html#GUC-MAX-CONNECTIONS - - For details, please see the following LP: - - https://bugs.launchpad.net/oslo.db/+bug/1535375 - - and the ML thread: - - http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html diff --git a/releasenotes/notes/wrap_db_retry-34c7ff2d82afa3f5.yaml b/releasenotes/notes/wrap_db_retry-34c7ff2d82afa3f5.yaml deleted file mode 100644 index 1e3b434..0000000 --- a/releasenotes/notes/wrap_db_retry-34c7ff2d82afa3f5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - Decorator ``oslo_db.api.wrap_db_retry`` now defaults to 10 retries. - Previously the number of attempts was 0, and users had to explicitly - pass ``max_retry_interval`` value greater than 0 to actually enable - retries on errors. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index d3b9315..0000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,277 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# oslo.db Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'oslosphinx', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'oslo.db Release Notes' -copyright = u'2016, oslo.db Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -# The full version, including alpha/beta/rc tags. -import pkg_resources -release = pkg_resources.get_distribution('oslo.db').version -# The short X.Y version. -version = release - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'oslo.configReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'oslo.configReleaseNotes.tex', - u'oslo.db Release Notes Documentation', - u'oslo.db Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'oslo.configreleasenotes', - u'oslo.db Release Notes Documentation', - [u'oslo.db Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'oslo.dbReleaseNotes', - u'oslo.db Release Notes Documentation', - u'oslo.db Developers', 'oslo.configReleaseNotes', - 'An OpenStack library for parsing configuration options from the command' - ' line and configuration files.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 06a65be..0000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -======================= - oslo.db Release Notes -======================= - - .. toctree:: - :maxdepth: 1 - - unreleased - liberty - mitaka diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 36217be..0000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Liberty Series Release Notes -============================== - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 437f4c3..0000000 --- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,89 +0,0 @@ -# Andi Chandler , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: oslo.db Release Notes 4.6.1.dev51\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2016-06-27 15:51+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-28 05:55+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en-GB\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "2.6.0-7" -msgstr "2.6.0-7" - -msgid "4.6.0" -msgstr "4.6.0" - -msgid "For details, please see the following LP:" -msgstr "For details, please see the following LP:" - -msgid "Introduce reno for deployer release notes." -msgstr "Introduce reno for deployer release notes." - -msgid "Liberty Series Release Notes" -msgstr "Liberty Series Release Notes" - -msgid "Mitaka Series Release Notes" -msgstr "Mitaka Series Release Notes" - -msgid "Other Notes" -msgstr "Other Notes" - -msgid "" -"The default value of ``max_overflow`` config option has been increased from " -"10 to 50 in order to allow OpenStack services heavily using DBs to better " -"handle spikes of concurrent requests and lower the probability of getting a " -"pool timeout issue." -msgstr "" -"The default value of ``max_overflow`` config option has been increased from " -"10 to 50 in order to allow OpenStack services heavily using DBs to better " -"handle spikes of concurrent requests and lower the probability of getting a " -"pool timeout issue." - -msgid "" -"This change potentially leads to increasing of the number of open " -"connections to an RDBMS server. Depending on the configuration, you may see " -"\"too many connections\" errors in logs of OpenStack services / RDBMS " -"server. The max limit of connections can be set by the means of these config " -"options:" -msgstr "" -"This change potentially leads to increasing of the number of open " -"connections to an RDBMS server. Depending on the configuration, you may see " -"\"too many connections\" errors in logs of OpenStack services / RDBMS " -"server. The max limit of connections can be set by the means of these config " -"options:" - -msgid "Unreleased Release Notes" -msgstr "Unreleased Release Notes" - -msgid "Upgrade Notes" -msgstr "Upgrade Notes" - -msgid "and the ML thread:" -msgstr "and the ML thread:" - -msgid "" -"http://dev.mysql.com/doc/refman/5.7/en/server-system-variables." -"html#sysvar_max_connections http://www.postgresql.org/docs/current/static/" -"runtime-config-connection.html#GUC-MAX-CONNECTIONS" -msgstr "" -"http://dev.mysql.com/doc/refman/5.7/en/server-system-variables." -"html#sysvar_max_connections http://www.postgresql.org/docs/current/static/" -"runtime-config-connection.html#GUC-MAX-CONNECTIONS" - -msgid "" -"http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html" -msgstr "" -"http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html" - -msgid "https://bugs.launchpad.net/oslo.db/+bug/1535375" -msgstr "https://bugs.launchpad.net/oslo.db/+bug/1535375" - -msgid "oslo.db Release Notes" -msgstr "oslo.db Release Notes" diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index e545609..0000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Mitaka Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 5860a46..0000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -========================== - Unreleased Release Notes -========================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 4dbd4f6..0000000 --- a/requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr>=1.6 # Apache-2.0 -alembic>=0.8.4 # MIT -debtcollector>=1.2.0 # Apache-2.0 -oslo.i18n>=2.1.0 # Apache-2.0 -oslo.config>=3.14.0 # Apache-2.0 -oslo.context>=2.4.0 # Apache-2.0 -oslo.utils>=3.16.0 # Apache-2.0 -SQLAlchemy<1.1.0,>=1.0.10 # MIT -sqlalchemy-migrate>=0.9.6 # Apache-2.0 -stevedore>=1.16.0 # Apache-2.0 -six>=1.9.0 # MIT diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index ca00a01..0000000 --- a/setup.cfg +++ /dev/null @@ -1,96 +0,0 @@ -[metadata] -name = oslo.db -summary = Oslo Database library -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://wiki.openstack.org/wiki/Oslo#oslo.db -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.4 - Programming Language :: Python :: 3.5 - -[extras] -# So e.g. nova can test-depend on oslo.db[mysql] -mysql = - PyMySQL>=0.6.2 # MIT License -# or oslo.db[mysql-c] -mysql-c = - MySQL-python:python_version=='2.7' # GPL with FOSS exception -# or oslo.db[postgresql] -postgresql = - psycopg2>=2.5 # LGPL/ZPL -# Dependencies for testing oslo.db itself. -test = - hacking<0.11,>=0.10.0 - coverage>=3.6 # Apache-2.0 - doc8 # Apache-2.0 - eventlet!=0.18.3,>=0.18.2 # MIT - fixtures>=3.0.0 # Apache-2.0/BSD - mock>=2.0 # BSD - python-subunit>=0.0.18 # Apache-2.0/BSD - sphinx!=1.3b1,<1.3,>=1.2.1 # BSD - oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 - oslotest>=1.10.0 # Apache-2.0 - testrepository>=0.0.18 # Apache-2.0/BSD - testtools>=1.4.0 # MIT - os-testr>=0.7.0 # Apache-2.0 - reno>=1.8.0 # Apache2 -fixtures = - testresources>=0.2.4 # Apache-2.0/BSD - testscenarios>=0.4 # Apache-2.0/BSD -pifpaf = - pifpaf>=0.10.0 # Apache-2.0 - -[files] -packages = - oslo_db - -[entry_points] -oslo.config.opts = - oslo.db = oslo_db.options:list_opts - oslo.db.concurrency = oslo_db.concurrency:list_opts - -oslo.db.migration = - alembic = oslo_db.sqlalchemy.migration_cli.ext_alembic:AlembicExtension - migrate = oslo_db.sqlalchemy.migration_cli.ext_migrate:MigrateExtension - -[wheel] -universal = 1 - -[build_sphinx] -source-dir = doc/source -build-dir = doc/build -all_files = 1 - -[upload_sphinx] -upload-dir = doc/build/html - -[compile_catalog] -directory = oslo_db/locale -domain = oslo_db - -[update_catalog] -domain = oslo_db -output_dir = oslo_db/locale -input_file = oslo_db/locale/oslo_db.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = oslo_db/locale/oslo_db.pot - -[pbr] -warnerrors = True -autodoc_index_modules = True -autodoc_exclude_modules = - oslo_db.tests.* diff --git a/setup.py b/setup.py deleted file mode 100644 index 782bb21..0000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=1.8'], - pbr=True) diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh deleted file mode 100755 index 9699a6f..0000000 --- a/tools/pretty_tox.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -# return nonzero exit status of rightmost command, so that we -# get nonzero exit on test failure without halting subunit-trace -set -o pipefail - - -TESTRARGS=$1 - -python setup.py testr --testr-args="--subunit $TESTRARGS" | subunit-trace -f -retval=$? -# NOTE(mtreinish) The pipe above would eat the slowest display from pbr's testr -# wrapper so just manually print the slowest tests -echo -e "\nSlowest Tests:\n" -testr slowest -exit $retval diff --git a/tools/run-pifpaf-tests.sh b/tools/run-pifpaf-tests.sh deleted file mode 100755 index 687b6bb..0000000 --- a/tools/run-pifpaf-tests.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e -# Replace mysql:// by mysql+pymysql:// and add sqlite -export OS_TEST_DBAPI_ADMIN_CONNECTION="${OS_TEST_DBAPI_ADMIN_CONNECTION/#mysql:/mysql+pymysql:};sqlite://" -echo $OS_TEST_DBAPI_ADMIN_CONNECTION -tools/pretty_tox.sh $* -TEST_EVENTLET=1 tools/pretty_tox.sh $* diff --git a/tox.ini b/tox.ini deleted file mode 100644 index a1d7bf0..0000000 --- a/tox.ini +++ /dev/null @@ -1,72 +0,0 @@ -[tox] -minversion = 1.8 -envlist = py35,py34,py27,pep8,pip-missing-reqs - -[testenv] -whitelist_externals = bash - env -setenv = - VIRTUAL_ENV={envdir} - BASECOMMAND=bash tools/pretty_tox.sh - - {postgresql,all}: PIFPAF_POSTGRESQL=pifpaf -g OS_TEST_DBAPI_ADMIN_CONNECTION run postgresql -- - {mysql,all}: PIFPAF_MYSQL=pifpaf -g OS_TEST_DBAPI_ADMIN_CONNECTION run mysql -- - {mysql,postgresql,all}: BASECOMMAND={toxinidir}/tools/run-pifpaf-tests.sh - -deps = .[test,fixtures,mysql,postgresql] - {postgresql,mysql,all}: .[pifpaf] - -commands = - {env:PIFPAF_MYSQL:} {env:PIFPAF_POSTGRESQL:} {env:BASECOMMAND:} '{posargs}' - -passenv = OS_TEST_DBAPI_ADMIN_CONNECTION - -[testenv:sqla_09] -commands = pip install SQLAlchemy>=0.9.0,!=0.9.5,<1.0.0 - python setup.py testr --slowest --testr-args='{posargs}' - -[testenv:py27] -commands = - env TEST_EVENTLET=0 bash tools/pretty_tox.sh '{posargs}' - env TEST_EVENTLET=1 bash tools/pretty_tox.sh '{posargs}' - -[testenv:mysql-python] -deps = .[mysql-c,postgresql,test,fixtures] -setenv = - {[testenv]setenv} - OS_TEST_DBAPI_ADMIN_CONNECTION=mysql://openstack_citest:openstack_citest@localhost/;postgresql://openstack_citest:openstack_citest@localhost/postgres;sqlite:// - -[testenv:pep8] -commands = flake8 - -[testenv:venv] -commands = {posargs} - -[testenv:cover] -commands = python setup.py test --coverage --coverage-package-name=oslo_db --testr-args='{posargs}' - -[testenv:docs] -commands = - doc8 -e .rst CONTRIBUTING.rst HACKING.rst README.rst doc/source - python setup.py build_sphinx - -[testenv:releasenotes] -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[flake8] -# E123, E125 skipped as they are invalid PEP-8. -ignore = E123,E125 -show-source = True -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build - -[hacking] -import_exceptions = - oslo_db._i18n - -[testenv:pip-missing-reqs] -# do not install test-requirements as that will pollute the virtualenv for -# determining missing packages -# this also means that pip-missing-reqs must be installed separately, outside -# of the requirements.txt files -deps = pip_missing_reqs -commands = pip-missing-reqs -d --ignore-module=oslo_db* --ignore-module=pkg_resources --ignore-file=oslo_db/tests/* oslo_db