Migrate from common code to oslo.db
Migrates Tuskar from common code to olso.db Adds oslo.db to requirements.txt. Adds oslotest to test-requirements.txt. Adds EngineFacade lazy loading - inspired in ironic. Updates imports and calls to adapt to moved code in oslo.db. Updates import_opt calls parameters to reflect move configs. Adds note to docs about setting up database.connection. Regenerates tuskar.conf.sample. Change-Id: I709209bc8ce82b7134d738da3a0fb609d02ffb8c Closes-Bug: #1290849
This commit is contained in:
parent
8792e30c45
commit
fc1b4fcc98
@ -78,6 +78,18 @@ tuskar.conf doesn't exist startup will fail. You can clone the templates::
|
|||||||
|
|
||||||
(sudo) git clone https://git.openstack.org/openstack/tripleo-heat-templates /etc/tuskar/tripleo-heat-templates/
|
(sudo) git clone https://git.openstack.org/openstack/tripleo-heat-templates /etc/tuskar/tripleo-heat-templates/
|
||||||
|
|
||||||
|
We need to tell tuskar where to connect to database. Edit the config file in ``database`` section and change
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
#connection=<None>
|
||||||
|
|
||||||
|
to
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
connection=sqlite:///tuskar/tuskar.sqlite
|
||||||
|
|
||||||
We need to initialise the database schema::
|
We need to initialise the database schema::
|
||||||
|
|
||||||
# activate the virtualenv
|
# activate the virtualenv
|
||||||
|
@ -65,17 +65,6 @@
|
|||||||
#mysql_engine=InnoDB
|
#mysql_engine=InnoDB
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in tuskar.openstack.common.db.sqlalchemy.session
|
|
||||||
#
|
|
||||||
|
|
||||||
# the filename to use with sqlite (string value)
|
|
||||||
#sqlite_db=tuskar.sqlite
|
|
||||||
|
|
||||||
# If true, use synchronous mode for sqlite (boolean value)
|
|
||||||
#sqlite_synchronous=true
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in tuskar.openstack.common.lockutils
|
# Options defined in tuskar.openstack.common.lockutils
|
||||||
#
|
#
|
||||||
@ -202,68 +191,109 @@
|
|||||||
[database]
|
[database]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in tuskar.openstack.common.db.api
|
# Options defined in oslo.db
|
||||||
#
|
#
|
||||||
|
|
||||||
# The backend to use for db (string value)
|
# The file name to use with SQLite. (string value)
|
||||||
|
#sqlite_db=oslo.sqlite
|
||||||
|
|
||||||
|
# If True, SQLite uses synchronous mode. (boolean value)
|
||||||
|
#sqlite_synchronous=true
|
||||||
|
|
||||||
|
# The back end to use for the database. (string value)
|
||||||
# Deprecated group/name - [DEFAULT]/db_backend
|
# Deprecated group/name - [DEFAULT]/db_backend
|
||||||
#backend=sqlalchemy
|
#backend=sqlalchemy
|
||||||
|
|
||||||
# Enable the experimental use of thread pooling for all DB API
|
# The SQLAlchemy connection string to use to connect to the
|
||||||
# calls (boolean value)
|
# database. (string value)
|
||||||
# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
|
|
||||||
#use_tpool=false
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in tuskar.openstack.common.db.sqlalchemy.session
|
|
||||||
#
|
|
||||||
|
|
||||||
# The SQLAlchemy connection string used to connect to the
|
|
||||||
# database (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
# Deprecated group/name - [DEFAULT]/sql_connection
|
||||||
#connection=sqlite:////tuskar/openstack/common/db/$sqlite_db
|
# Deprecated group/name - [DATABASE]/sql_connection
|
||||||
|
# Deprecated group/name - [sql]/connection
|
||||||
|
#connection=<None>
|
||||||
|
|
||||||
# timeout before idle sql connections are reaped (integer
|
# The SQLAlchemy connection string to use to connect to the
|
||||||
|
# slave database. (string value)
|
||||||
|
#slave_connection=<None>
|
||||||
|
|
||||||
|
# The SQL mode to be used for MySQL sessions. This option,
|
||||||
|
# including the default, overrides any server-set SQL mode. To
|
||||||
|
# use whatever SQL mode is set by the server configuration,
|
||||||
|
# set this to no value. Example: mysql_sql_mode= (string
|
||||||
|
# value)
|
||||||
|
#mysql_sql_mode=TRADITIONAL
|
||||||
|
|
||||||
|
# Timeout before idle SQL connections are reaped. (integer
|
||||||
# value)
|
# value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
||||||
|
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
||||||
|
# Deprecated group/name - [sql]/idle_timeout
|
||||||
#idle_timeout=3600
|
#idle_timeout=3600
|
||||||
|
|
||||||
# Minimum number of SQL connections to keep open in a pool
|
# Minimum number of SQL connections to keep open in a pool.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
||||||
|
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
||||||
#min_pool_size=1
|
#min_pool_size=1
|
||||||
|
|
||||||
# Maximum number of SQL connections to keep open in a pool
|
# Maximum number of SQL connections to keep open in a pool.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
||||||
#max_pool_size=5
|
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
||||||
|
#max_pool_size=<None>
|
||||||
|
|
||||||
# maximum db connection retries during startup. (setting -1
|
# Maximum db connection retries during startup. Set to -1 to
|
||||||
# implies an infinite retry count) (integer value)
|
# specify an infinite retry count. (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
||||||
|
# Deprecated group/name - [DATABASE]/sql_max_retries
|
||||||
#max_retries=10
|
#max_retries=10
|
||||||
|
|
||||||
# interval between retries of opening a sql connection
|
# Interval between retries of opening a SQL connection.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
||||||
|
# Deprecated group/name - [DATABASE]/reconnect_interval
|
||||||
#retry_interval=10
|
#retry_interval=10
|
||||||
|
|
||||||
# If set, use this value for max_overflow with sqlalchemy
|
# If set, use this value for max_overflow with SQLAlchemy.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
||||||
|
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
||||||
#max_overflow=<None>
|
#max_overflow=<None>
|
||||||
|
|
||||||
# Verbosity of SQL debugging information. 0=None,
|
# Verbosity of SQL debugging information: 0=None,
|
||||||
# 100=Everything (integer value)
|
# 100=Everything. (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
||||||
#connection_debug=0
|
#connection_debug=0
|
||||||
|
|
||||||
# Add python stack traces to SQL as comment strings (boolean
|
# Add Python stack traces to SQL as comment strings. (boolean
|
||||||
# value)
|
# value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
||||||
#connection_trace=false
|
#connection_trace=false
|
||||||
|
|
||||||
|
# If set, use this value for pool_timeout with SQLAlchemy.
|
||||||
|
# (integer value)
|
||||||
|
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
||||||
|
#pool_timeout=<None>
|
||||||
|
|
||||||
|
# Enable the experimental use of database reconnect on
|
||||||
|
# connection lost. (boolean value)
|
||||||
|
#use_db_reconnect=false
|
||||||
|
|
||||||
|
# Seconds between database connection retries. (integer value)
|
||||||
|
#db_retry_interval=1
|
||||||
|
|
||||||
|
# If True, increases the interval between database connection
|
||||||
|
# retries up to db_max_retry_interval. (boolean value)
|
||||||
|
#db_inc_retry_interval=true
|
||||||
|
|
||||||
|
# If db_inc_retry_interval is set, the maximum seconds between
|
||||||
|
# database connection retries. (integer value)
|
||||||
|
#db_max_retry_interval=10
|
||||||
|
|
||||||
|
# Maximum database connection retries before error is raised.
|
||||||
|
# Set to -1 to specify an infinite retry count. (integer
|
||||||
|
# value)
|
||||||
|
#db_max_retries=20
|
||||||
|
|
||||||
|
|
||||||
[heat]
|
[heat]
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@ iso8601>=0.1.8
|
|||||||
kombu>=2.4.8
|
kombu>=2.4.8
|
||||||
lxml>=2.3
|
lxml>=2.3
|
||||||
oslo.config>=1.2.0
|
oslo.config>=1.2.0
|
||||||
|
oslo.db>=0.3.0
|
||||||
pecan>=0.2.0
|
pecan>=0.2.0
|
||||||
posix_ipc
|
posix_ipc
|
||||||
python-heatclient>=0.2.3
|
python-heatclient>=0.2.3
|
||||||
|
@ -5,6 +5,7 @@ coverage>=3.6
|
|||||||
discover
|
discover
|
||||||
fixtures>=0.3.14
|
fixtures>=0.3.14
|
||||||
mock>=1.0
|
mock>=1.0
|
||||||
|
oslotest
|
||||||
# Doc requirements
|
# Doc requirements
|
||||||
sphinx>=1.1.2,<1.2
|
sphinx>=1.1.2,<1.2
|
||||||
sphinxcontrib-pecanwsme>=0.5
|
sphinxcontrib-pecanwsme>=0.5
|
||||||
|
@ -1 +1,2 @@
|
|||||||
export TUSKAR_CONFIG_GENERATOR_EXTRA_MODULES=keystoneclient.middleware.auth_token
|
export TUSKAR_CONFIG_GENERATOR_EXTRA_MODULES=keystoneclient.middleware.auth_token
|
||||||
|
export TUSKAR_CONFIG_GENERATOR_EXTRA_LIBRARIES=oslo.db
|
||||||
|
@ -25,7 +25,7 @@ from wsgiref import simple_server
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from tuskar.api import app
|
from tuskar.api import app
|
||||||
from tuskar.common import config
|
from tuskar.common import service as tuskar_service
|
||||||
from tuskar.openstack.common import log
|
from tuskar.openstack.common import log
|
||||||
|
|
||||||
|
|
||||||
@ -34,7 +34,7 @@ def main(argv=None):
|
|||||||
if argv is None:
|
if argv is None:
|
||||||
argv = sys.argv
|
argv = sys.argv
|
||||||
|
|
||||||
config.parse_args(argv)
|
tuskar_service.prepare_service(argv)
|
||||||
|
|
||||||
# Build and start the WSGI app
|
# Build and start the WSGI app
|
||||||
host = cfg.CONF.tuskar_api_bind_ip
|
host = cfg.CONF.tuskar_api_bind_ip
|
||||||
|
@ -18,9 +18,20 @@
|
|||||||
"""
|
"""
|
||||||
Run storage database migration.
|
Run storage database migration.
|
||||||
"""
|
"""
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from tuskar.common import service
|
||||||
from tuskar.db import migration
|
from tuskar.db import migration
|
||||||
|
from tuskar.db.sqlalchemy.api import get_backend
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main(argv=None):
|
||||||
|
|
||||||
|
if argv is None:
|
||||||
|
argv = sys.argv
|
||||||
|
|
||||||
|
# Prepare the Tuskar service and load the database backend.
|
||||||
|
service.prepare_service(argv)
|
||||||
|
get_backend()
|
||||||
|
|
||||||
migration.db_sync()
|
migration.db_sync()
|
||||||
|
@ -16,17 +16,14 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo.db import options as db_options
|
||||||
|
|
||||||
from tuskar.common import paths
|
|
||||||
from tuskar.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
from tuskar import version
|
from tuskar import version
|
||||||
|
|
||||||
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args(argv, default_config_files=None):
|
def parse_args(argv, default_config_files=None):
|
||||||
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
|
db_options.set_defaults(cfg.CONF, sqlite_db='tuskar.sqlite')
|
||||||
sqlite_db='tuskar.sqlite')
|
|
||||||
cfg.CONF(argv[1:],
|
cfg.CONF(argv[1:],
|
||||||
project='tuskar',
|
project='tuskar',
|
||||||
version=version.version_info.release_string(),
|
version=version.version_info.release_string(),
|
||||||
|
51
tuskar/common/service.py
Normal file
51
tuskar/common/service.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2012 eNovance <licensing@enovance.com>
|
||||||
|
#
|
||||||
|
# Author: Julien Danjou <julien@danjou.info>
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from tuskar.openstack.common import log
|
||||||
|
|
||||||
|
cfg.CONF.register_opts([
|
||||||
|
cfg.IntOpt('periodic_interval',
|
||||||
|
default=60,
|
||||||
|
help='seconds between running periodic tasks'),
|
||||||
|
cfg.StrOpt('host',
|
||||||
|
default=socket.getfqdn(),
|
||||||
|
help='Name of this node. This can be an opaque identifier. '
|
||||||
|
'It is not necessarily a hostname, FQDN, or IP address. '
|
||||||
|
'However, the node name must be valid within '
|
||||||
|
'an AMQP key, and if using ZeroMQ, a valid '
|
||||||
|
'hostname, FQDN, or IP address'),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_service(argv=[]):
|
||||||
|
cfg.set_defaults(log.log_opts,
|
||||||
|
default_log_levels=['amqp=WARN',
|
||||||
|
'amqplib=WARN',
|
||||||
|
'qpid.messaging=INFO',
|
||||||
|
'sqlalchemy=WARN',
|
||||||
|
'keystoneclient=INFO',
|
||||||
|
'stevedore=INFO',
|
||||||
|
'eventlet.wsgi.server=WARN',
|
||||||
|
'iso8601=WARN'
|
||||||
|
])
|
||||||
|
cfg.CONF(argv[1:], project='tuskar')
|
||||||
|
log.setup('tuskar')
|
@ -18,12 +18,13 @@ Base classes for storage engines
|
|||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
from oslo.db import api as db_api
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from tuskar.openstack.common.db import api as db_api
|
|
||||||
|
|
||||||
_BACKEND_MAPPING = {'sqlalchemy': 'tuskar.db.sqlalchemy.api'}
|
_BACKEND_MAPPING = {'sqlalchemy': 'tuskar.db.sqlalchemy.api'}
|
||||||
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
|
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
|
||||||
|
lazy=True)
|
||||||
|
|
||||||
|
|
||||||
def get_instance():
|
def get_instance():
|
||||||
|
@ -16,15 +16,8 @@
|
|||||||
|
|
||||||
"""Database setup and migration commands."""
|
"""Database setup and migration commands."""
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from tuskar.common import utils
|
from tuskar.common import utils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.import_opt('backend',
|
|
||||||
'tuskar.openstack.common.db.api',
|
|
||||||
group='database')
|
|
||||||
|
|
||||||
IMPL = utils.LazyPluggable(
|
IMPL = utils.LazyPluggable(
|
||||||
pivot='backend',
|
pivot='backend',
|
||||||
config_group='database',
|
config_group='database',
|
||||||
|
@ -15,9 +15,11 @@
|
|||||||
|
|
||||||
"""SQLAlchemy storage backend."""
|
"""SQLAlchemy storage backend."""
|
||||||
|
|
||||||
from oslo.config import cfg
|
import threading
|
||||||
|
|
||||||
# TODO(deva): import MultipleResultsFound and handle it appropriately
|
from oslo.config import cfg
|
||||||
|
from oslo.db import exception as db_exception
|
||||||
|
from oslo.db.sqlalchemy import session as db_session
|
||||||
from sqlalchemy.exc import IntegrityError
|
from sqlalchemy.exc import IntegrityError
|
||||||
from sqlalchemy.orm.exc import NoResultFound
|
from sqlalchemy.orm.exc import NoResultFound
|
||||||
from sqlalchemy.orm import subqueryload
|
from sqlalchemy.orm import subqueryload
|
||||||
@ -25,18 +27,37 @@ from sqlalchemy.orm import subqueryload
|
|||||||
from tuskar.common import exception
|
from tuskar.common import exception
|
||||||
from tuskar.db import api
|
from tuskar.db import api
|
||||||
from tuskar.db.sqlalchemy import models
|
from tuskar.db.sqlalchemy import models
|
||||||
from tuskar.openstack.common.db import exception as db_exception
|
|
||||||
from tuskar.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
from tuskar.openstack.common import log
|
from tuskar.openstack.common import log
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.import_opt('connection',
|
|
||||||
'tuskar.openstack.common.db.sqlalchemy.session',
|
|
||||||
group='database')
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
_FACADE = None
|
||||||
|
_LOCK = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
def _create_facade_lazily():
|
||||||
|
global _LOCK, _FACADE
|
||||||
|
if _FACADE is None:
|
||||||
|
with _LOCK:
|
||||||
|
if _FACADE is None:
|
||||||
|
_FACADE = db_session.EngineFacade.from_config(CONF)
|
||||||
|
|
||||||
|
return _FACADE
|
||||||
|
|
||||||
|
|
||||||
|
def get_engine():
|
||||||
|
facade = _create_facade_lazily()
|
||||||
|
return facade.get_engine()
|
||||||
|
|
||||||
|
|
||||||
|
def get_session(**kwargs):
|
||||||
|
facade = _create_facade_lazily()
|
||||||
|
return facade.get_session(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
def get_backend():
|
def get_backend():
|
||||||
"""The backend is this module itself."""
|
"""The backend is this module itself."""
|
||||||
return Connection()
|
return Connection()
|
||||||
@ -48,15 +69,11 @@ def model_query(model, *args, **kwargs):
|
|||||||
:param session: if present, the session to use
|
:param session: if present, the session to use
|
||||||
"""
|
"""
|
||||||
|
|
||||||
session = kwargs.get('session') or db_session.get_session()
|
session = kwargs.get('session') or get_session()
|
||||||
query = session.query(model, *args)
|
query = session.query(model, *args)
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
def get_session():
|
|
||||||
return db_session.get_session(sqlite_fk=True)
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(api.Connection):
|
class Connection(api.Connection):
|
||||||
"""SqlAlchemy connection."""
|
"""SqlAlchemy connection."""
|
||||||
|
|
||||||
|
@ -23,11 +23,11 @@ import sqlalchemy
|
|||||||
|
|
||||||
from tuskar.common import exception
|
from tuskar.common import exception
|
||||||
from tuskar.db import migration
|
from tuskar.db import migration
|
||||||
from tuskar.openstack.common.db.sqlalchemy import session as db_session
|
from tuskar.db.sqlalchemy import api as sqla_api
|
||||||
|
|
||||||
_REPOSITORY = None
|
_REPOSITORY = None
|
||||||
|
|
||||||
get_engine = db_session.get_engine
|
get_engine = sqla_api.get_engine
|
||||||
|
|
||||||
|
|
||||||
def db_sync(version=None):
|
def db_sync(version=None):
|
||||||
|
@ -16,13 +16,12 @@ Tuskar domain models for use with SQLAlchemy.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo.db.sqlalchemy import models
|
||||||
|
|
||||||
from sqlalchemy import (Column, ForeignKey, Integer, String, Text)
|
from sqlalchemy import (Column, ForeignKey, Integer, String, Text)
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
from sqlalchemy.orm import relationship
|
from sqlalchemy.orm import relationship
|
||||||
|
|
||||||
from tuskar.openstack.common.db.sqlalchemy import models
|
|
||||||
|
|
||||||
|
|
||||||
sql_opts = [
|
sql_opts = [
|
||||||
cfg.StrOpt('mysql_engine',
|
cfg.StrOpt('mysql_engine',
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,106 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2013 Rackspace Hosting
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Multiple DB API backend support.
|
|
||||||
|
|
||||||
Supported configuration options:
|
|
||||||
|
|
||||||
The following two parameters are in the 'database' group:
|
|
||||||
`backend`: DB backend name or full module path to DB backend module.
|
|
||||||
`use_tpool`: Enable thread pooling of DB API calls.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
|
||||||
takes no arguments. The method can return any object that implements DB
|
|
||||||
API methods.
|
|
||||||
|
|
||||||
*NOTE*: There are bugs in eventlet when using tpool combined with
|
|
||||||
threading locks. The python logging module happens to use such locks. To
|
|
||||||
work around this issue, be sure to specify thread=False with
|
|
||||||
eventlet.monkey_patch().
|
|
||||||
|
|
||||||
A bug for eventlet has been filed here:
|
|
||||||
|
|
||||||
https://bitbucket.org/eventlet/eventlet/issue/137/
|
|
||||||
"""
|
|
||||||
import functools
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from tuskar.openstack.common import importutils
|
|
||||||
from tuskar.openstack.common import lockutils
|
|
||||||
|
|
||||||
|
|
||||||
db_opts = [
|
|
||||||
cfg.StrOpt('backend',
|
|
||||||
default='sqlalchemy',
|
|
||||||
deprecated_name='db_backend',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='The backend to use for db'),
|
|
||||||
cfg.BoolOpt('use_tpool',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='dbapi_use_tpool',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='Enable the experimental use of thread pooling for '
|
|
||||||
'all DB API calls')
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(db_opts, 'database')
|
|
||||||
|
|
||||||
|
|
||||||
class DBAPI(object):
|
|
||||||
def __init__(self, backend_mapping=None):
|
|
||||||
if backend_mapping is None:
|
|
||||||
backend_mapping = {}
|
|
||||||
self.__backend = None
|
|
||||||
self.__backend_mapping = backend_mapping
|
|
||||||
|
|
||||||
@lockutils.synchronized('dbapi_backend', 'tuskar-')
|
|
||||||
def __get_backend(self):
|
|
||||||
"""Get the actual backend. May be a module or an instance of
|
|
||||||
a class. Doesn't matter to us. We do this synchronized as it's
|
|
||||||
possible multiple greenthreads started very quickly trying to do
|
|
||||||
DB calls and eventlet can switch threads before self.__backend gets
|
|
||||||
assigned.
|
|
||||||
"""
|
|
||||||
if self.__backend:
|
|
||||||
# Another thread assigned it
|
|
||||||
return self.__backend
|
|
||||||
backend_name = CONF.database.backend
|
|
||||||
self.__use_tpool = CONF.database.use_tpool
|
|
||||||
if self.__use_tpool:
|
|
||||||
from eventlet import tpool
|
|
||||||
self.__tpool = tpool
|
|
||||||
# Import the untranslated name if we don't have a
|
|
||||||
# mapping.
|
|
||||||
backend_path = self.__backend_mapping.get(backend_name,
|
|
||||||
backend_name)
|
|
||||||
backend_mod = importutils.import_module(backend_path)
|
|
||||||
self.__backend = backend_mod.get_backend()
|
|
||||||
return self.__backend
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
backend = self.__backend or self.__get_backend()
|
|
||||||
attr = getattr(backend, key)
|
|
||||||
if not self.__use_tpool or not hasattr(attr, '__call__'):
|
|
||||||
return attr
|
|
||||||
|
|
||||||
def tpool_wrapper(*args, **kwargs):
|
|
||||||
return self.__tpool.execute(attr, *args, **kwargs)
|
|
||||||
|
|
||||||
functools.update_wrapper(tpool_wrapper, attr)
|
|
||||||
return tpool_wrapper
|
|
@ -1,45 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""DB related custom exceptions."""
|
|
||||||
|
|
||||||
from tuskar.openstack.common.gettextutils import _
|
|
||||||
|
|
||||||
|
|
||||||
class DBError(Exception):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
self.inner_exception = inner_exception
|
|
||||||
super(DBError, self).__init__(str(inner_exception))
|
|
||||||
|
|
||||||
|
|
||||||
class DBDuplicateEntry(DBError):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, columns=[], inner_exception=None):
|
|
||||||
self.columns = columns
|
|
||||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBDeadlock(DBError):
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
super(DBDeadlock, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBInvalidUnicodeParameter(Exception):
|
|
||||||
message = _("Invalid Parameter: "
|
|
||||||
"Unicode is not supported by the current database.")
|
|
@ -1,16 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,105 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
SQLAlchemy models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from sqlalchemy import Column, Integer
|
|
||||||
from sqlalchemy import DateTime
|
|
||||||
from sqlalchemy.orm import object_mapper
|
|
||||||
|
|
||||||
from tuskar.openstack.common.db.sqlalchemy.session import get_session
|
|
||||||
from tuskar.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
class ModelBase(object):
|
|
||||||
"""Base class for models."""
|
|
||||||
__table_initialized__ = False
|
|
||||||
|
|
||||||
def save(self, session=None):
|
|
||||||
"""Save this object."""
|
|
||||||
if not session:
|
|
||||||
session = get_session()
|
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
|
||||||
# sesssion.add(self)
|
|
||||||
# session.flush()
|
|
||||||
# But there is a bug in sqlalchemy and eventlet that
|
|
||||||
# raises NoneType exception if there is no running
|
|
||||||
# transaction and rollback is called. As long as
|
|
||||||
# sqlalchemy has this bug we have to create transaction
|
|
||||||
# explicity.
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
session.add(self)
|
|
||||||
session.flush()
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
setattr(self, key, value)
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
return getattr(self, key)
|
|
||||||
|
|
||||||
def get(self, key, default=None):
|
|
||||||
return getattr(self, key, default)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
columns = dict(object_mapper(self).columns).keys()
|
|
||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
|
||||||
# property for an Instance.
|
|
||||||
if hasattr(self, '_extra_keys'):
|
|
||||||
columns.extend(self._extra_keys())
|
|
||||||
self._i = iter(columns)
|
|
||||||
return self
|
|
||||||
|
|
||||||
def next(self):
|
|
||||||
n = self._i.next()
|
|
||||||
return n, getattr(self, n)
|
|
||||||
|
|
||||||
def update(self, values):
|
|
||||||
"""Make the model object behave like a dict."""
|
|
||||||
for k, v in values.iteritems():
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
def iteritems(self):
|
|
||||||
"""Make the model object behave like a dict.
|
|
||||||
|
|
||||||
Includes attributes from joins."""
|
|
||||||
local = dict(self)
|
|
||||||
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
|
|
||||||
if not k[0] == '_'])
|
|
||||||
local.update(joined)
|
|
||||||
return local.iteritems()
|
|
||||||
|
|
||||||
|
|
||||||
class TimestampMixin(object):
|
|
||||||
created_at = Column(DateTime, default=timeutils.utcnow)
|
|
||||||
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
|
|
||||||
|
|
||||||
|
|
||||||
class SoftDeleteMixin(object):
|
|
||||||
deleted_at = Column(DateTime)
|
|
||||||
deleted = Column(Integer, default=0)
|
|
||||||
|
|
||||||
def soft_delete(self, session=None):
|
|
||||||
"""Mark this object as deleted."""
|
|
||||||
self.deleted = self.id
|
|
||||||
self.deleted_at = timeutils.utcnow()
|
|
||||||
self.save(session=session)
|
|
@ -1,706 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Session Handling for SQLAlchemy backend.
|
|
||||||
|
|
||||||
Initializing:
|
|
||||||
|
|
||||||
* Call set_defaults with the minimal of the following kwargs:
|
|
||||||
sql_connection, sqlite_db
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
session.set_defaults(
|
|
||||||
sql_connection="sqlite:///var/lib/tuskar/sqlite.db",
|
|
||||||
sqlite_db="/var/lib/tuskar/sqlite.db")
|
|
||||||
|
|
||||||
Recommended ways to use sessions within this framework:
|
|
||||||
|
|
||||||
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
|
|
||||||
model_query() will implicitly use a session when called without one
|
|
||||||
supplied. This is the ideal situation because it will allow queries
|
|
||||||
to be automatically retried if the database connection is interrupted.
|
|
||||||
|
|
||||||
Note: Automatic retry will be enabled in a future patch.
|
|
||||||
|
|
||||||
It is generally fine to issue several queries in a row like this. Even though
|
|
||||||
they may be run in separate transactions and/or separate sessions, each one
|
|
||||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
|
||||||
functionality should be handled at a logical level. For an example, look at
|
|
||||||
the code around quotas and reservation_rollback().
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
def get_foo(context, foo):
|
|
||||||
return model_query(context, models.Foo).\
|
|
||||||
filter_by(foo=foo).\
|
|
||||||
first()
|
|
||||||
|
|
||||||
def update_foo(context, id, newfoo):
|
|
||||||
model_query(context, models.Foo).\
|
|
||||||
filter_by(id=id).\
|
|
||||||
update({'foo': newfoo})
|
|
||||||
|
|
||||||
def create_foo(context, values):
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(values)
|
|
||||||
foo_ref.save()
|
|
||||||
return foo_ref
|
|
||||||
|
|
||||||
|
|
||||||
* Within the scope of a single method, keeping all the reads and writes within
|
|
||||||
the context managed by a single session. In this way, the session's __exit__
|
|
||||||
handler will take care of calling flush() and commit() for you.
|
|
||||||
If using this approach, you should not explicitly call flush() or commit().
|
|
||||||
Any error within the context of the session will cause the session to emit
|
|
||||||
a ROLLBACK. If the connection is dropped before this is possible, the
|
|
||||||
database will implicitly rollback the transaction.
|
|
||||||
|
|
||||||
Note: statements in the session scope will not be automatically retried.
|
|
||||||
|
|
||||||
If you create models within the session, they need to be added, but you
|
|
||||||
do not need to call model.save()
|
|
||||||
|
|
||||||
def create_many_foo(context, foos):
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
for foo in foos:
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(foo)
|
|
||||||
session.add(foo_ref)
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
foo_ref = model_query(context, models.Foo, session).\
|
|
||||||
filter_by(id=foo_id).\
|
|
||||||
first()
|
|
||||||
model_query(context, models.Bar, session).\
|
|
||||||
filter_by(id=foo_ref['bar_id']).\
|
|
||||||
update({'bar': newbar})
|
|
||||||
|
|
||||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
|
||||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
|
||||||
it is always best to use as few queries as possible. The two queries in
|
|
||||||
update_bar can be better expressed using a single query which avoids
|
|
||||||
the need for an explicit transaction. It can be expressed like so:
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
subq = model_query(context, models.Foo.id).\
|
|
||||||
filter_by(id=foo_id).\
|
|
||||||
limit(1).\
|
|
||||||
subquery()
|
|
||||||
model_query(context, models.Bar).\
|
|
||||||
filter_by(id=subq.as_scalar()).\
|
|
||||||
update({'bar': newbar})
|
|
||||||
|
|
||||||
For reference, this emits approximagely the following SQL statement:
|
|
||||||
|
|
||||||
UPDATE bar SET bar = ${newbar}
|
|
||||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
|
||||||
|
|
||||||
* Passing an active session between methods. Sessions should only be passed
|
|
||||||
to private methods. The private method must use a subtransaction; otherwise
|
|
||||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
|
||||||
transaction. Public methods should not accept a session parameter and should
|
|
||||||
not be involved in sessions within the caller's scope.
|
|
||||||
|
|
||||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
|
||||||
due to nesting transactions, and it is not possible to implicitly retry
|
|
||||||
failed database operations when using this approach.
|
|
||||||
|
|
||||||
This also makes code somewhat more difficult to read and debug, because a
|
|
||||||
single database transaction spans more than one method. Error handling
|
|
||||||
becomes less clear in this situation. When this is needed for code clarity,
|
|
||||||
it should be clearly documented.
|
|
||||||
|
|
||||||
def myfunc(foo):
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
# do some database things
|
|
||||||
bar = _private_func(foo, session)
|
|
||||||
return bar
|
|
||||||
|
|
||||||
def _private_func(foo, session=None):
|
|
||||||
if not session:
|
|
||||||
session = get_session()
|
|
||||||
with session.begin(subtransaction=True):
|
|
||||||
# do some other database things
|
|
||||||
return bar
|
|
||||||
|
|
||||||
|
|
||||||
There are some things which it is best to avoid:
|
|
||||||
|
|
||||||
* Don't keep a transaction open any longer than necessary.
|
|
||||||
|
|
||||||
This means that your "with session.begin()" block should be as short
|
|
||||||
as possible, while still containing all the related calls for that
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
* Avoid "with_lockmode('UPDATE')" when possible.
|
|
||||||
|
|
||||||
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
|
|
||||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
|
||||||
"gap" where no rows exist, and prevents any other writes to that space.
|
|
||||||
This can effectively prevent any INSERT into a table by locking the gap
|
|
||||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
|
||||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
|
||||||
|
|
||||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
|
||||||
number of rows matching a query, and if only one row is returned,
|
|
||||||
then issue the SELECT FOR UPDATE.
|
|
||||||
|
|
||||||
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
|
|
||||||
However, this can not be done until the "deleted" columns are removed and
|
|
||||||
proper UNIQUE constraints are added to the tables.
|
|
||||||
|
|
||||||
|
|
||||||
Enabling soft deletes:
|
|
||||||
|
|
||||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
|
||||||
to your model class. For example:
|
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
Efficient use of soft deletes:
|
|
||||||
|
|
||||||
* There are two possible ways to mark a record as deleted:
|
|
||||||
model.soft_delete() and query.soft_delete().
|
|
||||||
|
|
||||||
model.soft_delete() method works with single already fetched entry.
|
|
||||||
query.soft_delete() makes only one db request for all entries that correspond
|
|
||||||
to query.
|
|
||||||
|
|
||||||
* In almost all cases you should use query.soft_delete(). Some examples:
|
|
||||||
|
|
||||||
def soft_delete_bar():
|
|
||||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
|
||||||
if session is None:
|
|
||||||
session = get_session()
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
count = model_query(BarModel).\
|
|
||||||
find(some_condition).\
|
|
||||||
soft_delete(synchronize_session=True)
|
|
||||||
# Here synchronize_session is required, because we
|
|
||||||
# don't know what is going on in outer session.
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
* There is only one situation where model.soft_delete() is appropriate: when
|
|
||||||
you fetch a single record, work with it, and mark it as deleted in the same
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
def soft_delete_bar_model():
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
|
||||||
# Work with bar_ref
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
|
|
||||||
However, if you need to work with all entries that correspond to query and
|
|
||||||
then soft delete them you should use query.soft_delete() method:
|
|
||||||
|
|
||||||
def soft_delete_multi_models():
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
query = model_query(BarModel, session=session).\
|
|
||||||
find(some_condition)
|
|
||||||
model_refs = query.all()
|
|
||||||
# Work with model_refs
|
|
||||||
query.soft_delete(synchronize_session=False)
|
|
||||||
# synchronize_session=False should be set if there is no outer
|
|
||||||
# session and these entries are not used after this.
|
|
||||||
|
|
||||||
When working with many rows, it is very important to use query.soft_delete,
|
|
||||||
which issues a single query. Using model.soft_delete(), as in the following
|
|
||||||
example, is very inefficient.
|
|
||||||
|
|
||||||
for bar_ref in bar_refs:
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
# This will produce count(bar_refs) db requests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os.path
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
|
|
||||||
from eventlet import greenthread
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
from sqlalchemy import exc as sqla_exc
|
|
||||||
import sqlalchemy.interfaces
|
|
||||||
from sqlalchemy.interfaces import PoolListener
|
|
||||||
import sqlalchemy.orm
|
|
||||||
from sqlalchemy.pool import NullPool, StaticPool
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
|
||||||
|
|
||||||
from tuskar.openstack.common.db import exception
|
|
||||||
from tuskar.openstack.common import log as logging
|
|
||||||
from tuskar.openstack.common.gettextutils import _
|
|
||||||
from tuskar.openstack.common import timeutils
|
|
||||||
|
|
||||||
DEFAULT = 'DEFAULT'
|
|
||||||
|
|
||||||
sqlite_db_opts = [
|
|
||||||
cfg.StrOpt('sqlite_db',
|
|
||||||
default='tuskar.sqlite',
|
|
||||||
help='the filename to use with sqlite'),
|
|
||||||
cfg.BoolOpt('sqlite_synchronous',
|
|
||||||
default=True,
|
|
||||||
help='If true, use synchronous mode for sqlite'),
|
|
||||||
]
|
|
||||||
|
|
||||||
database_opts = [
|
|
||||||
cfg.StrOpt('connection',
|
|
||||||
default='sqlite:///' +
|
|
||||||
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
|
||||||
'../', '$sqlite_db')),
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'database',
|
|
||||||
deprecated_name='sql_connection',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
secret=True),
|
|
||||||
cfg.IntOpt('idle_timeout',
|
|
||||||
default=3600,
|
|
||||||
deprecated_name='sql_idle_timeout',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='timeout before idle sql connections are reaped'),
|
|
||||||
cfg.IntOpt('min_pool_size',
|
|
||||||
default=1,
|
|
||||||
deprecated_name='sql_min_pool_size',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='Minimum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_pool_size',
|
|
||||||
default=5,
|
|
||||||
deprecated_name='sql_max_pool_size',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='Maximum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_retries',
|
|
||||||
default=10,
|
|
||||||
deprecated_name='sql_max_retries',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='maximum db connection retries during startup. '
|
|
||||||
'(setting -1 implies an infinite retry count)'),
|
|
||||||
cfg.IntOpt('retry_interval',
|
|
||||||
default=10,
|
|
||||||
deprecated_name='sql_retry_interval',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='interval between retries of opening a sql connection'),
|
|
||||||
cfg.IntOpt('max_overflow',
|
|
||||||
default=None,
|
|
||||||
deprecated_name='sql_max_overflow',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
|
||||||
cfg.IntOpt('connection_debug',
|
|
||||||
default=0,
|
|
||||||
deprecated_name='sql_connection_debug',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='Verbosity of SQL debugging information. 0=None, '
|
|
||||||
'100=Everything'),
|
|
||||||
cfg.BoolOpt('connection_trace',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='sql_connection_trace',
|
|
||||||
deprecated_group=DEFAULT,
|
|
||||||
help='Add python stack traces to SQL as comment strings'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(sqlite_db_opts)
|
|
||||||
CONF.register_opts(database_opts, 'database')
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_ENGINE = None
|
|
||||||
_MAKER = None
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(sql_connection, sqlite_db):
|
|
||||||
"""Set defaults for configuration variables."""
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
connection=sql_connection)
|
|
||||||
cfg.set_defaults(sqlite_db_opts,
|
|
||||||
sqlite_db=sqlite_db)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
global _ENGINE, _MAKER
|
|
||||||
|
|
||||||
if _MAKER:
|
|
||||||
_MAKER.close_all()
|
|
||||||
_MAKER = None
|
|
||||||
if _ENGINE:
|
|
||||||
_ENGINE.dispose()
|
|
||||||
_ENGINE = None
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteForeignKeysListener(PoolListener):
|
|
||||||
"""
|
|
||||||
Ensures that the foreign key constraints are enforced in SQLite.
|
|
||||||
|
|
||||||
The foreign key constraints are disabled by default in SQLite,
|
|
||||||
so the foreign key constraints will be enabled here for every
|
|
||||||
database connection
|
|
||||||
"""
|
|
||||||
def connect(self, dbapi_con, con_record):
|
|
||||||
dbapi_con.execute('pragma foreign_keys=ON')
|
|
||||||
|
|
||||||
|
|
||||||
def get_session(autocommit=True, expire_on_commit=False,
|
|
||||||
sqlite_fk=False):
|
|
||||||
"""Return a SQLAlchemy session."""
|
|
||||||
global _MAKER
|
|
||||||
|
|
||||||
if _MAKER is None:
|
|
||||||
engine = get_engine(sqlite_fk=sqlite_fk)
|
|
||||||
_MAKER = get_maker(engine, autocommit, expire_on_commit)
|
|
||||||
|
|
||||||
session = _MAKER()
|
|
||||||
return session
|
|
||||||
|
|
||||||
|
|
||||||
# note(boris-42): In current versions of DB backends unique constraint
|
|
||||||
# violation messages follow the structure:
|
|
||||||
#
|
|
||||||
# sqlite:
|
|
||||||
# 1 column - (IntegrityError) column c1 is not unique
|
|
||||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
|
||||||
#
|
|
||||||
# postgres:
|
|
||||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "users_c1_key"
|
|
||||||
# N columns - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "name_of_our_constraint"
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
|
||||||
# 'c1'")
|
|
||||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
|
||||||
# with -' for key 'name_of_our_constraint'")
|
|
||||||
|
|
||||||
# FIXME(jistr): manually updated to work around this bug:
|
|
||||||
# https://bugs.launchpad.net/tuskar/+bug/1290849
|
|
||||||
_DUP_KEY_RE_DB = {
|
|
||||||
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
|
||||||
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
|
||||||
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
|
||||||
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME(jistr): manually updated to work around this bug:
|
|
||||||
# https://bugs.launchpad.net/tuskar/+bug/1290849
|
|
||||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|
||||||
"""
|
|
||||||
In this function will be raised DBDuplicateEntry exception if integrity
|
|
||||||
error wrap unique constraint violation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_columns_from_uniq_cons_or_name(columns):
|
|
||||||
# note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3"
|
|
||||||
# means that columns c1, c2, c3 are in UniqueConstraint.
|
|
||||||
uniqbase = "uniq_"
|
|
||||||
if not columns.startswith(uniqbase):
|
|
||||||
if engine_name == "postgresql":
|
|
||||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
|
||||||
return [columns]
|
|
||||||
return columns[len(uniqbase):].split("_x_")
|
|
||||||
|
|
||||||
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
|
||||||
return
|
|
||||||
|
|
||||||
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
|
||||||
m = pattern.match(integrity_error.message)
|
|
||||||
if m:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
columns = m.group(1)
|
|
||||||
|
|
||||||
if engine_name == "sqlite":
|
|
||||||
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
|
|
||||||
else:
|
|
||||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
|
||||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
|
||||||
# messages follow the structure:
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
|
||||||
# 'restarting transaction') <query_str> <query_args>
|
|
||||||
_DEADLOCK_RE_DB = {
|
|
||||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
|
||||||
"""
|
|
||||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
|
||||||
condition.
|
|
||||||
"""
|
|
||||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
|
||||||
if re is None:
|
|
||||||
return
|
|
||||||
m = re.match(operational_error.message)
|
|
||||||
if not m:
|
|
||||||
return
|
|
||||||
raise exception.DBDeadlock(operational_error)
|
|
||||||
|
|
||||||
|
|
||||||
def _wrap_db_error(f):
|
|
||||||
def _wrap(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
raise exception.DBInvalidUnicodeParameter()
|
|
||||||
# note(boris-42): We should catch unique constraint violation and
|
|
||||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
|
||||||
# violation is wrapped by IntegrityError.
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
_raise_if_deadlock_error(e, get_engine().name)
|
|
||||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
|
||||||
# so let's not wrap it for now.
|
|
||||||
raise
|
|
||||||
except sqla_exc.IntegrityError as e:
|
|
||||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
|
||||||
# DBs so we must do this. Also in some tables (for example
|
|
||||||
# instance_types) there are more than one unique constraint. This
|
|
||||||
# means we should get names of columns, which values violate
|
|
||||||
# unique constraint, from error message.
|
|
||||||
_raise_if_duplicate_entry_error(e, get_engine().name)
|
|
||||||
raise exception.DBError(e)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_('DB exception wrapped.'))
|
|
||||||
raise exception.DBError(e)
|
|
||||||
_wrap.func_name = f.func_name
|
|
||||||
return _wrap
|
|
||||||
|
|
||||||
|
|
||||||
def get_engine(sqlite_fk=False):
|
|
||||||
"""Return a SQLAlchemy engine."""
|
|
||||||
global _ENGINE
|
|
||||||
if _ENGINE is None:
|
|
||||||
_ENGINE = create_engine(CONF.database.connection,
|
|
||||||
sqlite_fk=sqlite_fk)
|
|
||||||
return _ENGINE
|
|
||||||
|
|
||||||
|
|
||||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
|
||||||
"""Switch sqlite connections to non-synchronous mode."""
|
|
||||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
|
||||||
|
|
||||||
|
|
||||||
def _add_regexp_listener(dbapi_con, con_record):
|
|
||||||
"""Add REGEXP function to sqlite connections."""
|
|
||||||
|
|
||||||
def regexp(expr, item):
|
|
||||||
reg = re.compile(expr)
|
|
||||||
return reg.search(six.text_type(item)) is not None
|
|
||||||
dbapi_con.create_function('regexp', 2, regexp)
|
|
||||||
|
|
||||||
|
|
||||||
def _greenthread_yield(dbapi_con, con_record):
|
|
||||||
"""
|
|
||||||
Ensure other greenthreads get a chance to execute by forcing a context
|
|
||||||
switch. With common database backends (eg MySQLdb and sqlite), there is
|
|
||||||
no implicit yield caused by network I/O since they are implemented by
|
|
||||||
C libraries that eventlet cannot monkey patch.
|
|
||||||
"""
|
|
||||||
greenthread.sleep(0)
|
|
||||||
|
|
||||||
|
|
||||||
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
|
||||||
"""
|
|
||||||
Ensures that MySQL connections checked out of the
|
|
||||||
pool are alive.
|
|
||||||
|
|
||||||
Borrowed from:
|
|
||||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
dbapi_conn.cursor().execute('select 1')
|
|
||||||
except dbapi_conn.OperationalError as ex:
|
|
||||||
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
|
|
||||||
LOG.warn(_('Got mysql server has gone away: %s'), ex)
|
|
||||||
raise sqla_exc.DisconnectionError("Database server went away")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def _is_db_connection_error(args):
|
|
||||||
"""Return True if error in connecting to db."""
|
|
||||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
|
||||||
# to support Postgres and others.
|
|
||||||
conn_err_codes = ('2002', '2003', '2006', '2013')
|
|
||||||
for err_code in conn_err_codes:
|
|
||||||
if args.find(err_code) != -1:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False):
|
|
||||||
"""Return a new SQLAlchemy engine."""
|
|
||||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
|
||||||
|
|
||||||
engine_args = {
|
|
||||||
"pool_recycle": CONF.database.idle_timeout,
|
|
||||||
"echo": False,
|
|
||||||
'convert_unicode': True,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Map our SQL debug level to SQLAlchemy's options
|
|
||||||
if CONF.database.connection_debug >= 100:
|
|
||||||
engine_args['echo'] = 'debug'
|
|
||||||
elif CONF.database.connection_debug >= 50:
|
|
||||||
engine_args['echo'] = True
|
|
||||||
|
|
||||||
if "sqlite" in connection_dict.drivername:
|
|
||||||
if sqlite_fk:
|
|
||||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
|
||||||
engine_args["poolclass"] = NullPool
|
|
||||||
|
|
||||||
if CONF.database.connection == "sqlite://":
|
|
||||||
engine_args["poolclass"] = StaticPool
|
|
||||||
engine_args["connect_args"] = {'check_same_thread': False}
|
|
||||||
else:
|
|
||||||
engine_args['pool_size'] = CONF.database.max_pool_size
|
|
||||||
if CONF.database.max_overflow is not None:
|
|
||||||
engine_args['max_overflow'] = CONF.database.max_overflow
|
|
||||||
|
|
||||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
|
||||||
|
|
||||||
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
|
|
||||||
|
|
||||||
if 'mysql' in connection_dict.drivername:
|
|
||||||
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
|
|
||||||
elif 'sqlite' in connection_dict.drivername:
|
|
||||||
if not CONF.sqlite_synchronous:
|
|
||||||
sqlalchemy.event.listen(engine, 'connect',
|
|
||||||
_synchronous_switch_listener)
|
|
||||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
|
||||||
|
|
||||||
if (CONF.database.connection_trace and
|
|
||||||
engine.dialect.dbapi.__name__ == 'MySQLdb'):
|
|
||||||
_patch_mysqldb_with_stacktrace_comments()
|
|
||||||
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
|
|
||||||
remaining = CONF.database.max_retries
|
|
||||||
if remaining == -1:
|
|
||||||
remaining = 'infinite'
|
|
||||||
while True:
|
|
||||||
msg = _('SQL connection failed. %s attempts left.')
|
|
||||||
LOG.warn(msg % remaining)
|
|
||||||
if remaining != 'infinite':
|
|
||||||
remaining -= 1
|
|
||||||
time.sleep(CONF.database.retry_interval)
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
break
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if (remaining != 'infinite' and remaining == 0) or \
|
|
||||||
not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
return engine
|
|
||||||
|
|
||||||
|
|
||||||
class Query(sqlalchemy.orm.query.Query):
|
|
||||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
|
||||||
def soft_delete(self, synchronize_session='evaluate'):
|
|
||||||
return self.update({'deleted': literal_column('id'),
|
|
||||||
'updated_at': literal_column('updated_at'),
|
|
||||||
'deleted_at': timeutils.utcnow()},
|
|
||||||
synchronize_session=synchronize_session)
|
|
||||||
|
|
||||||
|
|
||||||
class Session(sqlalchemy.orm.session.Session):
|
|
||||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
|
||||||
@_wrap_db_error
|
|
||||||
def query(self, *args, **kwargs):
|
|
||||||
return super(Session, self).query(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def flush(self, *args, **kwargs):
|
|
||||||
return super(Session, self).flush(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def execute(self, *args, **kwargs):
|
|
||||||
return super(Session, self).execute(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
|
||||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
|
||||||
return sqlalchemy.orm.sessionmaker(bind=engine,
|
|
||||||
class_=Session,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit,
|
|
||||||
query_cls=Query)
|
|
||||||
|
|
||||||
|
|
||||||
def _patch_mysqldb_with_stacktrace_comments():
|
|
||||||
"""Adds current stack trace as a comment in queries by patching
|
|
||||||
MySQLdb.cursors.BaseCursor._do_query.
|
|
||||||
"""
|
|
||||||
import MySQLdb.cursors
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
|
||||||
|
|
||||||
def _do_query(self, q):
|
|
||||||
stack = ''
|
|
||||||
for file, line, method, function in traceback.extract_stack():
|
|
||||||
# exclude various common things from trace
|
|
||||||
if file.endswith('session.py') and method == '_do_query':
|
|
||||||
continue
|
|
||||||
if file.endswith('api.py') and method == 'wrapper':
|
|
||||||
continue
|
|
||||||
if file.endswith('utils.py') and method == '_inner':
|
|
||||||
continue
|
|
||||||
if file.endswith('exception.py') and method == '_wrap':
|
|
||||||
continue
|
|
||||||
# db/api is just a wrapper around db/sqlalchemy/api
|
|
||||||
if file.endswith('db/api.py'):
|
|
||||||
continue
|
|
||||||
# only trace inside tuskar
|
|
||||||
index = file.rfind('tuskar')
|
|
||||||
if index == -1:
|
|
||||||
continue
|
|
||||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
|
||||||
% (file[index:], line, method, function)
|
|
||||||
|
|
||||||
# strip trailing " | " from stack
|
|
||||||
if stack:
|
|
||||||
stack = stack[:-3]
|
|
||||||
qq = "%s /* %s */" % (q, stack)
|
|
||||||
else:
|
|
||||||
qq = q
|
|
||||||
old_mysql_do_query(self, qq)
|
|
||||||
|
|
||||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
|
@ -1,132 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2010-2011 OpenStack Foundation.
|
|
||||||
# Copyright 2012 Justin Santa Barbara
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Implementation of paginate query."""
|
|
||||||
|
|
||||||
import sqlalchemy
|
|
||||||
|
|
||||||
from tuskar.openstack.common.gettextutils import _
|
|
||||||
from tuskar.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidSortKey(Exception):
|
|
||||||
message = _("Sort key supplied was not valid.")
|
|
||||||
|
|
||||||
|
|
||||||
# copy from glance/db/sqlalchemy/api.py
|
|
||||||
def paginate_query(query, model, limit, sort_keys, marker=None,
|
|
||||||
sort_dir=None, sort_dirs=None):
|
|
||||||
"""Returns a query with sorting / pagination criteria added.
|
|
||||||
|
|
||||||
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
|
||||||
(If sort_keys is not unique, then we risk looping through values.)
|
|
||||||
We use the last row in the previous page as the 'marker' for pagination.
|
|
||||||
So we must return values that follow the passed marker in the order.
|
|
||||||
With a single-valued sort_key, this would be easy: sort_key > X.
|
|
||||||
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
|
||||||
the lexicographical ordering:
|
|
||||||
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
|
||||||
|
|
||||||
We also have to cope with different sort_directions.
|
|
||||||
|
|
||||||
Typically, the id of the last row is used as the client-facing pagination
|
|
||||||
marker, then the actual marker object must be fetched from the db and
|
|
||||||
passed in to us as marker.
|
|
||||||
|
|
||||||
:param query: the query object to which we should add paging/sorting
|
|
||||||
:param model: the ORM model class
|
|
||||||
:param limit: maximum number of items to return
|
|
||||||
:param sort_keys: array of attributes by which results should be sorted
|
|
||||||
:param marker: the last item of the previous page; we returns the next
|
|
||||||
results after this value.
|
|
||||||
:param sort_dir: direction in which results should be sorted (asc, desc)
|
|
||||||
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
|
||||||
|
|
||||||
:rtype: sqlalchemy.orm.query.Query
|
|
||||||
:return: The query with sorting/pagination added.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if 'id' not in sort_keys:
|
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
|
||||||
# the actual primary key, rather than assuming its id
|
|
||||||
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
|
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
|
||||||
|
|
||||||
# Default the sort direction to ascending
|
|
||||||
if sort_dirs is None and sort_dir is None:
|
|
||||||
sort_dir = 'asc'
|
|
||||||
|
|
||||||
# Ensure a per-column sort direction
|
|
||||||
if sort_dirs is None:
|
|
||||||
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
|
||||||
|
|
||||||
assert(len(sort_dirs) == len(sort_keys))
|
|
||||||
|
|
||||||
# Add sorting
|
|
||||||
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
|
||||||
sort_dir_func = {
|
|
||||||
'asc': sqlalchemy.asc,
|
|
||||||
'desc': sqlalchemy.desc,
|
|
||||||
}[current_sort_dir]
|
|
||||||
|
|
||||||
try:
|
|
||||||
sort_key_attr = getattr(model, current_sort_key)
|
|
||||||
except AttributeError:
|
|
||||||
raise InvalidSortKey()
|
|
||||||
query = query.order_by(sort_dir_func(sort_key_attr))
|
|
||||||
|
|
||||||
# Add pagination
|
|
||||||
if marker is not None:
|
|
||||||
marker_values = []
|
|
||||||
for sort_key in sort_keys:
|
|
||||||
v = getattr(marker, sort_key)
|
|
||||||
marker_values.append(v)
|
|
||||||
|
|
||||||
# Build up an array of sort criteria as in the docstring
|
|
||||||
criteria_list = []
|
|
||||||
for i in range(0, len(sort_keys)):
|
|
||||||
crit_attrs = []
|
|
||||||
for j in range(0, i):
|
|
||||||
model_attr = getattr(model, sort_keys[j])
|
|
||||||
crit_attrs.append((model_attr == marker_values[j]))
|
|
||||||
|
|
||||||
model_attr = getattr(model, sort_keys[i])
|
|
||||||
if sort_dirs[i] == 'desc':
|
|
||||||
crit_attrs.append((model_attr < marker_values[i]))
|
|
||||||
elif sort_dirs[i] == 'asc':
|
|
||||||
crit_attrs.append((model_attr > marker_values[i]))
|
|
||||||
else:
|
|
||||||
raise ValueError(_("Unknown sort direction, "
|
|
||||||
"must be 'desc' or 'asc'"))
|
|
||||||
|
|
||||||
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
|
||||||
criteria_list.append(criteria)
|
|
||||||
|
|
||||||
f = sqlalchemy.sql.or_(*criteria_list)
|
|
||||||
query = query.filter(f)
|
|
||||||
|
|
||||||
if limit is not None:
|
|
||||||
query = query.limit(limit)
|
|
||||||
|
|
||||||
return query
|
|
@ -21,8 +21,8 @@ from sqlalchemy import and_
|
|||||||
from sqlalchemy import func
|
from sqlalchemy import func
|
||||||
from sqlalchemy.orm.exc import NoResultFound
|
from sqlalchemy.orm.exc import NoResultFound
|
||||||
|
|
||||||
|
from tuskar.db.sqlalchemy.api import get_session
|
||||||
from tuskar.db.sqlalchemy.models import StoredFile
|
from tuskar.db.sqlalchemy.models import StoredFile
|
||||||
from tuskar.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
from tuskar.storage.drivers.base import BaseDriver
|
from tuskar.storage.drivers.base import BaseDriver
|
||||||
from tuskar.storage.exceptions import NameAlreadyUsed
|
from tuskar.storage.exceptions import NameAlreadyUsed
|
||||||
from tuskar.storage.exceptions import UnknownName
|
from tuskar.storage.exceptions import UnknownName
|
||||||
@ -39,10 +39,6 @@ sql_opts = [
|
|||||||
cfg.CONF.register_opts(sql_opts)
|
cfg.CONF.register_opts(sql_opts)
|
||||||
|
|
||||||
|
|
||||||
def get_session():
|
|
||||||
return db_session.get_session(sqlite_fk=True)
|
|
||||||
|
|
||||||
|
|
||||||
class SQLAlchemyDriver(BaseDriver):
|
class SQLAlchemyDriver(BaseDriver):
|
||||||
|
|
||||||
def _generate_uuid(self):
|
def _generate_uuid(self):
|
||||||
|
@ -34,7 +34,7 @@ import unittest2
|
|||||||
|
|
||||||
from tuskar.common import paths
|
from tuskar.common import paths
|
||||||
from tuskar.db import migration
|
from tuskar.db import migration
|
||||||
from tuskar.openstack.common.db.sqlalchemy import session
|
from tuskar.db.sqlalchemy import api as sqla_api
|
||||||
from tuskar.openstack.common import log as logging
|
from tuskar.openstack.common import log as logging
|
||||||
from tuskar.tests import conf_fixture
|
from tuskar.tests import conf_fixture
|
||||||
|
|
||||||
@ -46,11 +46,7 @@ test_opts = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(test_opts)
|
CONF.register_opts(test_opts, group='database')
|
||||||
CONF.import_opt('connection',
|
|
||||||
'tuskar.openstack.common.db.sqlalchemy.session',
|
|
||||||
group='database')
|
|
||||||
CONF.import_opt('sqlite_db', 'tuskar.openstack.common.db.sqlalchemy.session')
|
|
||||||
CONF.set_override('use_stderr', False)
|
CONF.set_override('use_stderr', False)
|
||||||
|
|
||||||
logging.setup('tuskar')
|
logging.setup('tuskar')
|
||||||
@ -60,13 +56,13 @@ _DB_CACHE = None
|
|||||||
|
|
||||||
class Database(fixtures.Fixture):
|
class Database(fixtures.Fixture):
|
||||||
|
|
||||||
def __init__(self, db_session, db_migrate, sql_connection, sqlite_db,
|
def __init__(self, db_api, db_migrate, sql_connection, sqlite_db,
|
||||||
sqlite_clean_db):
|
sqlite_clean_db):
|
||||||
self.sql_connection = sql_connection
|
self.sql_connection = sql_connection
|
||||||
self.sqlite_db = sqlite_db
|
self.sqlite_db = sqlite_db
|
||||||
self.sqlite_clean_db = sqlite_clean_db
|
self.sqlite_clean_db = sqlite_clean_db
|
||||||
|
|
||||||
self.engine = db_session.get_engine()
|
self.engine = db_api.get_engine()
|
||||||
self.engine.dispose()
|
self.engine.dispose()
|
||||||
conn = self.engine.connect()
|
conn = self.engine.connect()
|
||||||
if sql_connection == "sqlite://":
|
if sql_connection == "sqlite://":
|
||||||
@ -139,10 +135,10 @@ class TestCase(testtools.TestCase, unittest2.TestCase):
|
|||||||
global _DB_CACHE
|
global _DB_CACHE
|
||||||
if not _DB_CACHE:
|
if not _DB_CACHE:
|
||||||
_DB_CACHE = Database(
|
_DB_CACHE = Database(
|
||||||
session, migration,
|
sqla_api, migration,
|
||||||
sql_connection=CONF.database.connection,
|
sql_connection=CONF.database.connection,
|
||||||
sqlite_db=CONF.sqlite_db,
|
sqlite_db=CONF.database.sqlite_db,
|
||||||
sqlite_clean_db=CONF.sqlite_clean_db
|
sqlite_clean_db=CONF.database.sqlite_clean_db
|
||||||
)
|
)
|
||||||
self.useFixture(_DB_CACHE)
|
self.useFixture(_DB_CACHE)
|
||||||
|
|
||||||
|
@ -25,10 +25,14 @@ from tuskar.tests.base import TestCase
|
|||||||
class ApiCommandTests(TestCase):
|
class ApiCommandTests(TestCase):
|
||||||
|
|
||||||
@patch('wsgiref.simple_server.make_server')
|
@patch('wsgiref.simple_server.make_server')
|
||||||
def test_start(self, mock_make_server):
|
def test_main(self, mock_make_server):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
main(['test.py', '--config-file', 'etc/tuskar/tuskar.conf.sample'])
|
main(['test.py', '--config-file', 'etc/tuskar/tuskar.conf.sample'])
|
||||||
|
|
||||||
|
# Catch BaseException's and re-raise as Exception, otherwise
|
||||||
|
# exceptions raised by the argument parser code wont be caught and
|
||||||
|
# create a cryptic test failure.
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
raise Exception(e)
|
raise Exception(e)
|
||||||
|
|
||||||
|
38
tuskar/tests/cmd/test_dbsync.py
Normal file
38
tuskar/tests/cmd/test_dbsync.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from mock import patch
|
||||||
|
|
||||||
|
from tuskar.cmd.dbsync import main
|
||||||
|
from tuskar.tests.base import TestCase
|
||||||
|
|
||||||
|
|
||||||
|
class DBSyncCommandTests(TestCase):
|
||||||
|
|
||||||
|
@patch('tuskar.db.migration.db_sync')
|
||||||
|
def test_main(self, mock_db_sync):
|
||||||
|
|
||||||
|
try:
|
||||||
|
main(['test.py', '--config-file', 'etc/tuskar/tuskar.conf.sample'])
|
||||||
|
|
||||||
|
# Catch BaseException's and re-raise as Exception, otherwise
|
||||||
|
# exceptions raised by the argument parser code wont be caught and
|
||||||
|
# create a cryptic test failure.
|
||||||
|
except BaseException as e:
|
||||||
|
raise Exception(e)
|
||||||
|
|
||||||
|
mock_db_sync.assert_called_once_with()
|
@ -33,7 +33,7 @@ class ConfFixture(fixtures.Fixture):
|
|||||||
super(ConfFixture, self).setUp()
|
super(ConfFixture, self).setUp()
|
||||||
|
|
||||||
self.conf.set_default('connection', "sqlite://", group='database')
|
self.conf.set_default('connection', "sqlite://", group='database')
|
||||||
self.conf.set_default('sqlite_synchronous', False)
|
self.conf.set_default('sqlite_synchronous', False, group='database')
|
||||||
self.conf.set_default('use_ipv6', True)
|
self.conf.set_default('use_ipv6', True)
|
||||||
self.conf.set_default('verbose', True)
|
self.conf.set_default('verbose', True)
|
||||||
self.conf.set_default('tht_local_dir',
|
self.conf.set_default('tht_local_dir',
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
|
|
||||||
from tuskar.db import migration as db_migration
|
from tuskar.db import migration as db_migration
|
||||||
from tuskar.openstack.common.db.sqlalchemy import session as db_session
|
from tuskar.db.sqlalchemy import api as sqla_api
|
||||||
from tuskar.tests.db import base as db_base
|
from tuskar.tests.db import base as db_base
|
||||||
|
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ class CurrentDatabaseSchemaTests(db_base.DbTestCase):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_db_table(table_name):
|
def _get_db_table(table_name):
|
||||||
metadata = sqlalchemy.MetaData()
|
metadata = sqlalchemy.MetaData()
|
||||||
metadata.bind = db_session.get_engine()
|
metadata.bind = sqla_api.get_engine()
|
||||||
return sqlalchemy.Table(table_name, metadata, autoload=True)
|
return sqlalchemy.Table(table_name, metadata, autoload=True)
|
||||||
|
|
||||||
def _assert_columns(self, table, column_types):
|
def _assert_columns(self, table, column_types):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user