diff --git a/.gitignore b/.gitignore
index 4be77d6..79377d7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,45 @@
-*.pyc
-.ropeproject
-openstack_neat.egg-info
-build
+*.py[cod]
+*.sqlite
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
 dist
-distribute-*
-.idea/
+build
+.venv
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+nosetests.xml
+cover/*
+.testrepository/
+subunit.log
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
+.idea
+.DS_Store
+etc/*.conf
+tools/lintstack.head.py
+tools/pylint_exceptions
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 0000000..222c48f
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,49 @@
+# The format of this file isn't really documented; just use --generate-rcfile
+[MASTER]
+# Add <file or directory> to the black list. It should be a base name, not a
+# path. You may set this option multiple times.
+ignore=tests
+ignore=openstack
+
+[Messages Control]
+# NOTE(justinsb): We might want to have a 2nd strict pylintrc in future
+# C0111: Don't require docstrings on every method
+# W0511: TODOs in code comments are fine.
+# W0142: *args and **kwargs are fine.
+# W0622: Redefining id is fine.
+# W0703: Catch "Exception".
+disable=C0111,W0511,W0142,W0622,W0703
+
+[Basic]
+# Variable names can be 1 to 31 characters long, with lowercase and underscores
+variable-rgx=[a-z_][a-z0-9_]{0,30}$
+
+# Argument names can be 2 to 31 characters long, with lowercase and underscores
+argument-rgx=[a-z_][a-z0-9_]{1,30}$
+
+# Type attributes names can be 2 to 31 characters long, with lowercase and underscores
+attr-rgx=[a-z_][a-z0-9_]{1,30}$
+
+# Method names shold be at least 3 characters long and be lowercased with underscores
+method-rgx=([a-z_][a-z0-9_]{1,30}|setUp|tearDown)$
+
+# Module names matching savanna-* are ok (files in bin/)
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(savanna-[a-z0-9_-]+))$
+
+# Don't require docstrings on tests.
+no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
+
+[Design]
+max-public-methods=100
+min-public-methods=0
+max-args=6
+
+[Variables]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+# _ is used by our localization
+additional-builtins=_
+
+[TYPECHECK]
+generated-members=query,node_template,status_code,data
diff --git a/.testr.conf b/.testr.conf
new file mode 100644
index 0000000..bb49f15
--- /dev/null
+++ b/.testr.conf
@@ -0,0 +1,9 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
+             OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
+             OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
+             OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \
+             ${PYTHON:-python} -m subunit.run discover -t ./ ./mistral/tests/unit $LISTOPT $IDOPTION
+
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 2a95b5f..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-language: python
-python:
-  - 2.7
-install:
-  - sudo apt-get update
-  - sudo apt-get install -qq python-libvirt python-numpy python-scipy
-  - cp /usr/lib/python2.7/dist-packages/libvirt* ~/virtualenv/python2.7/lib/python2.7/site-packages/
-  - cp -r /usr/lib/python2.7/dist-packages/numpy* ~/virtualenv/python2.7/lib/python2.7/site-packages/
-  - cp -r /usr/lib/python2.7/dist-packages/scipy* ~/virtualenv/python2.7/lib/python2.7/site-packages/
-  - pip install --use-mirrors pyqcy mocktest PyContracts nose SQLAlchemy bottle requests python-novaclient
-script: nosetests
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 2fd24bd..0000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,6 +0,0 @@
-include README.rst
-include LICENSE
-include NOTICE
-include distribute_setup.py
-include init.d/*
-include neat.conf
diff --git a/TODO b/TODO
deleted file mode 100644
index 3b4fbd6..0000000
--- a/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-RPM package
-
-1. python2 setup.py bdist_rpm
-2. Added #!/usr/bin/python2 to start-*.py
-3. cp start-data-collector.py /usr/bin/neat-data-collector
-4. cp initscripts/* /etc/init.d/
-5. cp neat.conf /etc/neat/neat.conf
-
-RPM manuals:
-
-https://fedoraproject.org/wiki/How_to_create_an_RPM_package
-https://fedoraproject.org/wiki/Packaging:Guidelines
-https://fedoraproject.org/wiki/Packaging:Python
-http://fedoraproject.org/wiki/Packaging:SysVInitScript
-http://docs.python.org/distutils/builtdist.html
-http://stackoverflow.com/questions/2324933/creating-python-rpm
diff --git a/etc/logging.conf.sample b/etc/logging.conf.sample
new file mode 100644
index 0000000..58c5dea
--- /dev/null
+++ b/etc/logging.conf.sample
@@ -0,0 +1,32 @@
+[loggers]
+keys=root
+
+[handlers]
+keys=consoleHandler, fileHandler
+
+[formatters]
+keys=verboseFormatter, simpleFormatter
+
+[logger_root]
+level=DEBUG
+handlers=consoleHandler, fileHandler
+
+[handler_consoleHandler]
+class=StreamHandler
+level=INFO
+formatter=simpleFormatter
+args=(sys.stdout,)
+
+[handler_fileHandler]
+class=FileHandler
+level=INFO
+formatter=verboseFormatter
+args=("/var/log/mistral.log",)
+
+[formatter_verboseFormatter]
+format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s
+datefmt=
+
+[formatter_simpleFormatter]
+format=%(asctime)s %(levelname)s [-] %(message)s
+datefmt=
diff --git a/neat.conf b/etc/neat.conf.sample
similarity index 89%
rename from neat.conf
rename to etc/neat.conf.sample
index 6d20db6..87ae641 100644
--- a/neat.conf
+++ b/etc/neat.conf.sample
@@ -1,19 +1,3 @@
-# Copyright 2012 Anton Beloglazov
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is the default configuration file for OpenStack Neat
-
 [DEFAULT]
 
 # The directory, where log files will be created by the Neat services
@@ -66,15 +50,6 @@ local_data_directory = /var/lib/neat
 # manager in seconds
 local_manager_interval = 300
 
-# The time interval between subsequent invocations of the data
-# collector in seconds
-data_collector_interval = 300
-
-# The number of the latest data values stored locally by the data
-# collector and passed to the underload / overload detection and VM
-# placement algorithms
-data_collector_data_length = 100
-
 # The threshold on the overall (all cores) utilization of the physical
 # CPU of a host, above which the host is considered to be overloaded.
 # This is used for logging host overloads into the database.
@@ -148,3 +123,17 @@ algorithm_vm_placement_factory = neat.globals.vm_placement.bin_packing.best_fit_
 # A JSON encoded parameters, which will be parsed and passed to the
 # specified VM placement algorithm factory
 algorithm_vm_placement_parameters = {"cpu_threshold": 0.8, "ram_threshold": 0.95, "last_n_vm_cpu": 2}
+
+[global_manager]
+
+[local_manager]
+
+[collector]
+# The time interval between subsequent invocations of the data
+# collector in seconds
+data_collector_interval = 300
+
+# The number of the latest data values stored locally by the data
+# collector and passed to the underload / overload detection and VM
+# placement algorithms
+data_collector_data_length = 100
diff --git a/openstack-common.conf b/openstack-common.conf
new file mode 100644
index 0000000..719f6c0
--- /dev/null
+++ b/openstack-common.conf
@@ -0,0 +1,17 @@
+[DEFAULT]
+
+# The list of modules to copy from oslo-incubator.git
+module=config.generator
+module=log
+module=jsonutils
+module=lockutils
+module=loopingcall
+module=periodic_task
+module=threadgroup
+module=timeutils
+module=importutils
+module=strutils
+module=uuidutils
+
+# The base module to hold the copy of openstack.common
+base=mistral
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..8b19302
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,33 @@
+alembic>=0.7.2
+pbr>=0.6,!=0.7,<1.0
+eventlet>=0.15.0
+PyYAML>=3.1.0
+pecan>=0.8.0
+WSME>=0.6
+amqplib>=0.6.1 # This is not in global requirements (master branch)
+argparse
+Babel>=1.3
+iso8601>=0.1.9
+posix_ipc
+croniter>=0.3.4 # MIT License
+requests>=1.2.1,!=2.4.0
+kombu>=2.4.8
+oslo.config>=1.4.0,<1.10.0  # Apache-2.0
+oslo.db>=1.0.0  # Apache-2.0
+oslo.messaging>=1.4.0
+oslo.utils>=1.2.0,<1.5.0  # Apache-2.0
+paramiko>=1.13.0
+python-cinderclient>=1.1.0
+python-heatclient>=0.2.9
+python-keystoneclient>=0.10.0
+python-neutronclient>=2.3.6,<3
+python-novaclient>=2.18.0
+python-glanceclient>=0.14.0
+networkx>=1.8
+six>=1.7.0
+SQLAlchemy>=0.9.7,<=0.9.99
+stevedore>=1.0.0  # Apache-2.0
+yaql==0.2.4 # This is not in global requirements
+jsonschema>=2.0.0,<3.0.0
+mock>=1.0
+keystonemiddleware>=1.0.0
diff --git a/run_tests.sh b/run_tests.sh
new file mode 100755
index 0000000..305c4e5
--- /dev/null
+++ b/run_tests.sh
@@ -0,0 +1,226 @@
+#!/bin/bash
+
+set -eu
+
+function usage {
+  echo "Usage: $0 [OPTION]..."
+  echo "Run Mistral's test suite(s)"
+  echo ""
+  echo "  -V, --virtual-env           Always use virtualenv.  Install automatically if not present"
+  echo "  -N, --no-virtual-env        Don't use virtualenv.  Run tests in local environment"
+  echo "  -s, --no-site-packages      Isolate the virtualenv from the global Python environment"
+  echo "  -r, --recreate-db           Recreate the test database (deprecated, as this is now the default)."
+  echo "  -n, --no-recreate-db        Don't recreate the test database."
+  echo "  -f, --force                 Force a clean re-build of the virtual environment. Useful when dependencies have been added."
+  echo "  -u, --update                Update the virtual environment with any newer package versions"
+  echo "  -p, --pep8                  Just run PEP8 and HACKING compliance check"
+  echo "  -P, --no-pep8               Don't run static code checks"
+  echo "  -c, --coverage              Generate coverage report"
+  echo "  -d, --debug                 Run tests with testtools instead of testr. This allows you to use the debugger."
+  echo "  -h, --help                  Print this usage message"
+  echo "  --virtual-env-path <path>   Location of the virtualenv directory"
+  echo "                               Default: \$(pwd)"
+  echo "  --virtual-env-name <name>   Name of the virtualenv directory"
+  echo "                               Default: .venv"
+  echo "  --tools-path <dir>          Location of the tools directory"
+  echo "                               Default: \$(pwd)"
+  echo ""
+  echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
+  echo "      If no virtualenv is found, the script will ask if you would like to create one.  If you "
+  echo "      prefer to run tests NOT in a virtual environment, simply pass the -N option."
+  exit
+}
+
+function process_options {
+  i=1
+  while [ $i -le $# ]; do
+    case "${!i}" in
+      -h|--help) usage;;
+      -V|--virtual-env) always_venv=1; never_venv=0;;
+      -N|--no-virtual-env) always_venv=0; never_venv=1;;
+      -s|--no-site-packages) no_site_packages=1;;
+      -r|--recreate-db) recreate_db=1;;
+      -n|--no-recreate-db) recreate_db=0;;
+      -f|--force) force=1;;
+      -u|--update) update=1;;
+      -p|--pep8) just_pep8=1;;
+      -P|--no-pep8) no_pep8=1;;
+      -c|--coverage) coverage=1;;
+      -d|--debug) debug=1;;
+      --virtual-env-path)
+        (( i++ ))
+        venv_path=${!i}
+        ;;
+      --virtual-env-name)
+        (( i++ ))
+        venv_dir=${!i}
+        ;;
+      --tools-path)
+        (( i++ ))
+        tools_path=${!i}
+        ;;
+      -*) testropts="$testropts ${!i}";;
+      *) testrargs="$testrargs ${!i}"
+    esac
+    (( i++ ))
+  done
+}
+
+tool_path=${tools_path:-$(pwd)}
+venv_path=${venv_path:-$(pwd)}
+venv_dir=${venv_name:-.venv}
+with_venv=tools/with_venv.sh
+always_venv=0
+never_venv=0
+force=0
+no_site_packages=0
+installvenvopts=
+testrargs=
+testropts=
+wrapper=""
+just_pep8=0
+no_pep8=0
+coverage=0
+debug=0
+recreate_db=1
+update=0
+
+LANG=en_US.UTF-8
+LANGUAGE=en_US:en
+LC_ALL=C
+
+process_options $@
+# Make our paths available to other scripts we call
+export venv_path
+export venv_dir
+export venv_name
+export tools_dir
+export venv=${venv_path}/${venv_dir}
+
+if [ $no_site_packages -eq 1 ]; then
+  installvenvopts="--no-site-packages"
+fi
+
+
+function run_tests {
+  # Cleanup *pyc
+  ${wrapper} find . -type f -name "*.pyc" -delete
+
+  if [ $debug -eq 1 ]; then
+    if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
+      # Default to running all tests if specific test is not
+      # provided.
+      testrargs="discover ./mistral/tests/unit"
+    fi
+    ${wrapper} python -m testtools.run $testropts $testrargs
+
+    # Short circuit because all of the testr and coverage stuff
+    # below does not make sense when running testtools.run for
+    # debugging purposes.
+    return $?
+  fi
+
+  if [ $coverage -eq 1 ]; then
+    TESTRTESTS="$TESTRTESTS --coverage"
+  else
+    TESTRTESTS="$TESTRTESTS --slowest"
+  fi
+
+  # Just run the test suites in current environment
+  set +e
+  testrargs=$(echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/')
+  TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testropts $testrargs'"
+  OS_TEST_PATH=$(echo $testrargs|grep -o 'mistral\.tests[^[:space:]:]*\+'|tr . /)
+  if [ -d "$OS_TEST_PATH" ]; then
+      wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper"
+  elif [ -d "$(dirname $OS_TEST_PATH)" ]; then
+      wrapper="OS_TEST_PATH=$(dirname $OS_TEST_PATH) $wrapper"
+  fi
+  echo "Running ${wrapper} $TESTRTESTS"
+  bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit"
+  RESULT=$?
+  set -e
+
+  copy_subunit_log
+
+  if [ $coverage -eq 1 ]; then
+    echo "Generating coverage report in covhtml/"
+    # Don't compute coverage for common code, which is tested elsewhere
+    ${wrapper} coverage combine
+    ${wrapper} coverage html --include='mistral/*' --omit='mistral/openstack/common/*' -d covhtml -i
+  fi
+
+  return $RESULT
+}
+
+function copy_subunit_log {
+  LOGNAME=$(cat .testrepository/next-stream)
+  LOGNAME=$(($LOGNAME - 1))
+  LOGNAME=".testrepository/${LOGNAME}"
+  cp $LOGNAME subunit.log
+}
+
+function run_pep8 {
+  echo "Running flake8 ..."
+
+  ${wrapper} flake8
+}
+
+
+TESTRTESTS="python -m mistral.openstack.common.lockutils python setup.py testr"
+
+if [ $never_venv -eq 0 ]
+then
+  # Remove the virtual environment if --force used
+  if [ $force -eq 1 ]; then
+    echo "Cleaning virtualenv..."
+    rm -rf ${venv}
+  fi
+  if [ $update -eq 1 ]; then
+      echo "Updating virtualenv..."
+      python tools/install_venv.py $installvenvopts
+  fi
+  if [ -e ${venv} ]; then
+    wrapper="${with_venv}"
+  else
+    if [ $always_venv -eq 1 ]; then
+      # Automatically install the virtualenv
+      python tools/install_venv.py $installvenvopts
+      wrapper="${with_venv}"
+    else
+      echo -e "No virtual environment found...create one? (Y/n) \c"
+      read use_ve
+      if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
+        # Install the virtualenv and run the test suite in it
+        python tools/install_venv.py $installvenvopts
+        wrapper=${with_venv}
+      fi
+    fi
+  fi
+fi
+
+# Delete old coverage data from previous runs
+if [ $coverage -eq 1 ]; then
+    ${wrapper} coverage erase
+fi
+
+if [ $just_pep8 -eq 1 ]; then
+    run_pep8
+    exit
+fi
+
+if [ $recreate_db -eq 1 ]; then
+    rm -f tests.sqlite
+fi
+
+run_tests
+
+# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
+# not when we're running tests individually. To handle this, we need to
+# distinguish between options (testropts), which begin with a '-', and
+# arguments (testrargs).
+if [ -z "$testrargs" ]; then
+  if [ $no_pep8 -eq 0 ]; then
+    run_pep8
+  fi
+fi
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..e3edada
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,34 @@
+[metadata]
+name = terracotta
+summary = Dynamic Scheduling Serice for OpenStack Cloud
+description-file =
+    README.rst
+license = Apache License, Version 2.0
+home-page = https://launchpad.net/terracotta
+classifiers =
+    Programming Language :: Python
+    Programming Language :: Python :: 2
+    Programming Language :: Python :: 2.7
+    Environment :: OpenStack
+    Intended Audience :: Information Technology
+    Intended Audience :: System Administrators
+    #License :: OSI Approved :: Apache Software License
+    Operating System :: POSIX :: Linux
+author = Openstack Terracotta Team
+author-email = openstack-dev@lists.openstack.org
+
+[files]
+packages =
+    terracotta
+
+[build_sphinx]
+source-dir = doc/source
+build-dir = doc/build
+all_files = 1
+
+[upload_sphinx]
+upload-dir = doc/build/html
+
+[entry_points]
+console_scripts =
+    terracotta-server = terracotta.cmd.launch:main
diff --git a/setup.py b/setup.py
index 0f65ddc..7363757 100644
--- a/setup.py
+++ b/setup.py
@@ -1,100 +1,30 @@
-# Copyright 2012 Anton Beloglazov
+#!/usr/bin/env python
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#    http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
-The OpenStack Neat Project
-==========================
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
 
-OpenStack Neat is a project intended to provide an extension to
-OpenStack implementing dynamic consolidation of Virtual Machines (VMs)
-using live migration. The major objective of dynamic VM consolidation
-is to improve the utilization of physical resources and reduce energy
-consumption by re-allocating VMs using live migration according to
-their real-time resource demand and switching idle hosts to the sleep
-mode. Apart from consolidating VMs, the system should be able to react
-to increases in the resource demand and deconsolidate VMs when
-necessary to avoid performance degradation. In general, the problem of
-dynamic VM consolidation includes 4 sub-problems: host underload /
-overload detection, VM selection, and VM placement.
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
 
-This work is conducted within the Cloud Computing and Distributed
-Systems (CLOUDS) Laboratory (http://www.cloudbus.org/) at the
-University of Melbourne. The problem of dynamic VM consolidation
-considering Quality of Service (QoS) constraints has been studied from
-the theoretical perspective and algorithms addressing the sub-problems
-listed above have been proposed [1], [2]. The algorithms have been
-evaluated using CloudSim (http://code.google.com/p/cloudsim/) and
-real-world workload traces collected from more than a thousand
-PlanetLab VMs hosted on servers located in more than 500 places around
-the world.
-
-The aim of the OpenStack Neat project is to provide an extensible
-framework for dynamic consolidation of VMs based on the OpenStack
-platform. The framework should provide an infrastructure enabling the
-interaction of components implementing the decision-making algorithms.
-The framework should allow configuration-driven switching of different
-implementations of the decision-making algorithms. The implementation
-of the framework will include the algorithms proposed in our previous
-works [1], [2].
-
-[1] Anton Beloglazov and Rajkumar Buyya, "Optimal Online Deterministic
-Algorithms and Adaptive Heuristics for Energy and Performance
-Efficient Dynamic Consolidation of Virtual Machines in Cloud Data
-Centers", Concurrency and Computation: Practice and Experience (CCPE),
-Volume 24, Issue 13, Pages: 1397-1420, John Wiley & Sons, Ltd, New
-York, USA, 2012. Download:
-http://beloglazov.info/papers/2012-optimal-algorithms-ccpe.pdf
-
-[2] Anton Beloglazov and Rajkumar Buyya, "Managing Overloaded Hosts
-for Dynamic Consolidation of Virtual Machines in Cloud Data Centers
-Under Quality of Service Constraints", IEEE Transactions on Parallel
-and Distributed Systems (TPDS), IEEE CS Press, USA, 2012 (in press,
-accepted on August 2, 2012). Download:
-http://beloglazov.info/papers/2012-host-overload-detection-tpds.pdf
-"""
-
-import distribute_setup
-distribute_setup.use_setuptools()
-
-from setuptools import setup, find_packages
-
-
-setup(
-    name='openstack-neat',
-    version='0.1',
-    description='The OpenStack Neat Project',
-    long_description=__doc__,
-    author='Anton Beloglazov',
-    author_email='anton.beloglazov@gmail.com',
-    url='https://github.com/beloglazov/openstack-neat',
-    platforms='any',
-    include_package_data=True,
-    license='LICENSE',
-    packages=find_packages(),
-    test_suite='tests',
-    tests_require=['pyqcy', 'mocktest', 'PyContracts'],
-    entry_points = {
-        'console_scripts': [
-            'neat-data-collector = neat.locals.collector:start',
-            'neat-local-manager  = neat.locals.manager:start',
-            'neat-global-manager = neat.globals.manager:start',
-            'neat-db-cleaner     = neat.globals.db_cleaner:start',
-            ]
-        },
-    data_files = [('/etc/init.d', ['init.d/openstack-neat-data-collector',
-                                   'init.d/openstack-neat-local-manager',
-                                   'init.d/openstack-neat-global-manager',
-                                   'init.d/openstack-neat-db-cleaner']),
-                  ('/etc/neat', ['neat.conf'])],
-)
+setuptools.setup(
+    setup_requires=['pbr'],
+    pbr=True)
diff --git a/setup/README.md b/setup/README.md
deleted file mode 100644
index 303e364..0000000
--- a/setup/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-1. Create a MySQL database and user for OpenStack Neat:
-
-```
-CREATE DATABASE neat;
-GRANT ALL ON neat.* TO 'neat'@'controller' IDENTIFIED BY 'neatpassword';
-GRANT ALL ON neat.* TO 'neat'@'%' IDENTIFIED BY 'neatpassword';
-```
diff --git a/setup/deps-arch.sh b/setup/deps-arch.sh
deleted file mode 100755
index 617e315..0000000
--- a/setup/deps-arch.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-
-# Copyright 2012 Anton Beloglazov
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-sudo pip2 install --upgrade pyqcy mocktest PyContracts SQLAlchemy bottle requests Sphinx python-novaclient
-sudo pacman -S python2-numpy python2-scipy
diff --git a/setup/deps-centos.sh b/setup/deps-centos.sh
deleted file mode 100755
index a703f25..0000000
--- a/setup/deps-centos.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-
-# Copyright 2012 Anton Beloglazov
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-sudo yum install -y python-pip numpy scipy libvirt-python
-sudo pip install --upgrade pyqcy PyContracts SQLAlchemy bottle requests Sphinx python-novaclient
-sudo pip install mocktest
diff --git a/setup/update-chkconfig.sh b/setup/update-chkconfig.sh
deleted file mode 100755
index b6e0b1b..0000000
--- a/setup/update-chkconfig.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-chkconfig --add openstack-neat-data-collector
-chkconfig --add openstack-neat-db-cleaner
-chkconfig --add openstack-neat-global-manager
-chkconfig --add openstack-neat-local-manager
diff --git a/terracotta/cmd/launch.py b/terracotta/cmd/launch.py
index 10d8d73..bbb9ba4 100644
--- a/terracotta/cmd/launch.py
+++ b/terracotta/cmd/launch.py
@@ -25,22 +25,20 @@ eventlet.monkey_patch(
 
 import os
 
-# If ../mistral/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
 POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
-                                   os.pardir,
-                                   os.pardir))
+                                                os.pardir,
+                                                os.pardir))
 if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'mistral', '__init__.py')):
     sys.path.insert(0, POSSIBLE_TOPDIR)
 
 from oslo_config import cfg
 from oslo_log import log as logging
 import oslo_messaging as messaging
-from wsgiref import simple_server
 
-from mistral.api import app
 from terracotta import config
 from terracotta import rpc
+from terracotta.locals import collector
+from terracotta.locals import manager as local_mgr
 from terracotta.globals import manager as global_mgr
 
 from mistral import context as ctx
@@ -55,41 +53,14 @@ from mistral import version
 LOG = logging.getLogger(__name__)
 
 
-def launch_executor(transport):
+def launch_lm(transport):
     target = messaging.Target(
-        topic=cfg.CONF.executor.topic,
-        server=cfg.CONF.executor.host
+        topic=cfg.CONF.local_manager.topic,
+        server=cfg.CONF.local_manager.host
     )
 
-    executor_v2 = def_executor.DefaultExecutor(rpc.get_engine_client())
-
-    endpoints = [rpc.ExecutorServer(executor_v2)]
-
-    server = messaging.get_rpc_server(
-        transport,
-        target,
-        endpoints,
-        executor='eventlet',
-        serializer=ctx.RpcContextSerializer(ctx.JsonPayloadSerializer())
-    )
-
-    server.start()
-    server.wait()
-
-
-def launch_engine(transport):
-    target = messaging.Target(
-        topic=cfg.CONF.engine.topic,
-        server=cfg.CONF.engine.host
-    )
-
-    engine_v2 = def_eng.DefaultEngine(rpc.get_engine_client())
-    endpoints = [rpc.EngineServer(engine_v2)]
-
-
-    # Setup scheduler in engine.
-    db_api.setup_db()
-    scheduler.setup()
+    local_manager = local_mgr.LocalManager()
+    endpoints = [rpc.LocalManagerServer(local_manager)]
 
     server = messaging.get_rpc_server(
         transport,
@@ -109,13 +80,8 @@ def launch_gm(transport):
         server=cfg.CONF.global_manager.host
     )
 
-    engine_v2 = def_eng.DefaultEngine(rpc.get_engine_client())
-
-    endpoints = [rpc.EngineServer(engine_v2)]
-
-    # Setup scheduler in engine.
-    db_api.setup_db()
-    scheduler.setup()
+    global_manager = global_mgr.GlobalManager()
+    endpoints = [rpc.GlobalManagerServer(global_manager)]
 
     server = messaging.get_rpc_server(
         transport,
@@ -129,24 +95,28 @@ def launch_gm(transport):
     server.wait()
 
 
-def launch_api(transport):
-    host = cfg.CONF.api.host
-    port = cfg.CONF.api.port
-
-    server = simple_server.make_server(
-        host,
-        port,
-        app.setup_app()
+def launch_collector(transport):
+    target = messaging.Target(
+        topic=cfg.CONF.local_collector.topic,
+        server=cfg.CONF.local_collector.host
     )
 
-    LOG.info("Mistral API is serving on http://%s:%s (PID=%s)" %
-             (host, port, os.getpid()))
+    global_manager = collector.Collector()
+    endpoints = [rpc.GlobalManagerServer(global_manager)]
 
-    server.serve_forever()
+    server = messaging.get_rpc_server(
+        transport,
+        target,
+        endpoints,
+        executor='eventlet',
+        serializer=ctx.RpcContextSerializer(ctx.JsonPayloadSerializer())
+    )
+
+    server.start()
+    server.wait()
 
 
 def launch_any(transport, options):
-    # Launch the servers on different threads.
     threads = [eventlet.spawn(LAUNCH_OPTIONS[option], transport)
                for option in options]
 
@@ -156,7 +126,6 @@ def launch_any(transport, options):
 
 
 LAUNCH_OPTIONS = {
-    # 'api': launch_api,
     'global-manager': launch_gm,
     'local-collector': launch_collector,
     'local-manager': launch_lm
@@ -175,7 +144,7 @@ Terracotta Dynamic Scheduling Service, version %s
 """ % version.version_string()
 
 
-def print_server_info():
+def print_service_info():
     print(TERRACOTTA_TITLE)
 
     comp_str = ("[%s]" % ','.join(LAUNCH_OPTIONS)
@@ -187,16 +156,10 @@ def print_server_info():
 def main():
     try:
         config.parse_args()
-        print_server_info()
+        print_service_info()
         logging.setup(cfg.CONF, 'Terracotta')
         transport = rpc.get_transport()
 
-        # Validate launch option.
-        if set(cfg.CONF.server) - set(LAUNCH_OPTIONS.keys()):
-            raise Exception('Valid options are all or any combination of '
-                            'api, engine, and executor.')
-
-        # Launch distinct set of server(s).
         launch_any(transport, set(cfg.CONF.server))
 
     except RuntimeError as excp:
diff --git a/terracotta/common.py b/terracotta/common.py
index 071e01d..a8cab1b 100644
--- a/terracotta/common.py
+++ b/terracotta/common.py
@@ -1,10 +1,11 @@
 # Copyright 2012 Anton Beloglazov
+# Copyright 2015 Huawei Technologies Co. Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,60 +15,17 @@
 
 """ The functions from this module are shared by other components.
 """
+import json
+import numpy
+import os
+import re
+import subprocess
+import time
 
 from contracts import contract
-from neat.contracts_primitive import *
-from neat.contracts_extra import *
 
-import os
-import time
-import json
-import re
-import numpy
-import subprocess
-
-from neat.config import *
-from neat.db_utils import *
-
-import logging
-log = logging.getLogger(__name__)
-
-
-@contract
-def start(init_state, execute, config, time_interval, iterations=-1):
-    """ Start the processing loop.
-
-    :param init_state: A function accepting a config and
-                       returning a state dictionary.
-     :type init_state: function
-
-    :param execute: A function performing the processing at each iteration.
-     :type execute: function
-
-    :param config: A config dictionary.
-     :type config: dict(str: *)
-
-    :param time_interval: The time interval to wait between iterations.
-     :type time_interval: int
-
-    :param iterations: The number of iterations to perform, -1 for infinite.
-     :type iterations: int
-
-    :return: The final state.
-     :rtype: dict(str: *)
-    """
-    state = init_state(config)
-
-    if iterations == -1:
-        while True:
-            state = execute(config, state)
-            time.sleep(time_interval)
-    else:
-        for _ in xrange(iterations):
-            state = execute(config, state)
-            time.sleep(time_interval)
-
-    return state
+from terracotta.contracts_primitive import *
+from terracotta.contracts_extra import *
 
 
 @contract
@@ -133,7 +91,7 @@ def physical_cpu_mhz_total(vir_connection):
      :rtype: int
     """
     return physical_cpu_count(vir_connection) * \
-        physical_cpu_mhz(vir_connection)
+           physical_cpu_mhz(vir_connection)
 
 
 @contract
@@ -154,57 +112,6 @@ def frange(start, end, step):
         start += step
 
 
-@contract
-def init_logging(log_directory, log_file, log_level):
-    """ Initialize the logging system.
-
-    :param log_directory: The directory to store log files.
-     :type log_directory: str
-
-    :param log_file: The file name to store log messages.
-     :type log_file: str
-
-    :param log_level: The level of emitted log messages.
-     :type log_level: int
-
-    :return: Whether the logging system has been initialized.
-     :rtype: bool
-    """
-    if log_level == 0:
-        logging.disable(logging.CRITICAL)
-        return True
-
-    if not os.access(log_file, os.F_OK):
-        if not os.access(log_directory, os.F_OK):
-            os.makedirs(log_directory)
-        elif not os.access(log_directory, os.W_OK):
-            raise IOError(
-                'Cannot write to the log directory: ' + log_directory)
-    elif not os.access(log_file, os.W_OK):
-        raise IOError('Cannot write to the log file: ' + log_file)
-
-    if log_level == 3:
-        level = logging.DEBUG
-    elif log_level == 2:
-        level = logging.INFO
-    else:
-        level = logging.WARNING
-
-    logger = logging.root
-    logger.handlers = []
-    logger.filters = []
-
-    logger.setLevel(level)
-    handler = logging.FileHandler(
-        os.path.join(log_directory, log_file))
-    handler.setFormatter(
-        logging.Formatter(
-            '%(asctime)s %(levelname)-8s %(name)s %(message)s'))
-    logger.addHandler(handler)
-
-    return True
-
-
 @contract
 def call_function_by_name(name, args):
     """ Call a function specified by a fully qualified name.
diff --git a/terracotta/config.py b/terracotta/config.py
index 9f4d46f..6e746a4 100644
--- a/terracotta/config.py
+++ b/terracotta/config.py
@@ -1,4 +1,5 @@
 # Copyright 2012 Anton Beloglazov
+# Copyright 2015 - Huawei Technologies Co. Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,119 +22,6 @@ from oslo_log import log as logging
 
 from terracotta import version
 
-from contracts import contract
-import os
-import ConfigParser
-
-
-log = logging.getLogger(__name__)
-
-
-# This is the default config, which should not be modified
-DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__),
-                                   '..',
-                                   'neat.conf')
-
-# This is the custom config, which may override the defaults
-CONFIG_PATH = "/etc/neat/neat.conf"
-# The following value is used for testing purposes
-#CONFIG_PATH = os.path.join(os.path.dirname(__file__),
-#                           '..',
-#                           'neat.conf')
-
-# These fields must present in the configuration file
-REQUIRED_FIELDS = [
-    'log_directory',
-    'log_level',
-    'vm_instance_directory',
-    'sql_connection',
-    'os_admin_tenant_name',
-    'os_admin_user',
-    'os_admin_password',
-    'os_auth_url',
-    'compute_hosts',
-    'global_manager_host',
-    'global_manager_port',
-    'db_cleaner_interval',
-    'local_data_directory',
-    'local_manager_interval',
-    'data_collector_interval',
-    'data_collector_data_length',
-    'host_cpu_overload_threshold',
-    'host_cpu_usable_by_vms',
-    'compute_user',
-    'compute_password',
-    'sleep_command',
-    'ether_wake_interface',
-    'block_migration',
-    'network_migration_bandwidth',
-    'algorithm_underload_detection_factory',
-    'algorithm_underload_detection_parameters',
-    'algorithm_overload_detection_factory',
-    'algorithm_overload_detection_parameters',
-    'algorithm_vm_selection_factory',
-    'algorithm_vm_selection_parameters',
-    'algorithm_vm_placement_factory',
-    'algorithm_vm_placement_parameters',
-]
-
-
-@contract
-def read_config(paths):
-    """ Read the configuration files and return the options.
-
-    :param paths: A list of required configuration file paths.
-     :type paths: list(str)
-
-    :return: A dictionary of the configuration options.
-     :rtype: dict(str: str)
-    """
-    configParser = ConfigParser.ConfigParser()
-    for path in paths:
-        configParser.read(path)
-    return dict(configParser.items("DEFAULT"))
-
-
-@contract
-def validate_config(config, required_fields):
-    """ Check that the config contains all the required fields.
-
-    :param config: A config dictionary to check.
-     :type config: dict(str: str)
-
-    :param required_fields: A list of required fields.
-     :type required_fields: list(str)
-
-    :return: Whether the config is valid.
-     :rtype: bool
-    """
-    for field in required_fields:
-        if not field in config:
-            return False
-    return True
-
-
-@contract
-def read_and_validate_config(paths, required_fields):
-    """ Read the configuration files, validate and return the options.
-
-    :param paths: A list of required configuration file paths.
-     :type paths: list(str)
-
-    :param required_fields: A list of required fields.
-     :type required_fields: list(str)
-
-    :return: A dictionary of the configuration options.
-     :rtype: dict(str: str)
-    """
-    config = read_config(paths)
-    if not validate_config(config, required_fields):
-        message = 'The config dictionary does not contain ' + \
-                  'all the required fields'
-        log.critical(message)
-        raise KeyError(message)
-    return config
-
 
 launch_opt = cfg.ListOpt(
     'server',
@@ -171,7 +59,7 @@ use_debugger = cfg.BoolOpt(
     'Use at your own risk.'
 )
 
-engine_opts = [
+global_manager_opts = [
     cfg.StrOpt('engine', default='default',
                help='Mistral engine plugin'),
     cfg.StrOpt('host', default='0.0.0.0',
@@ -184,7 +72,7 @@ engine_opts = [
                help='The version of the engine.')
 ]
 
-executor_opts = [
+local_manager_opts = [
     cfg.StrOpt('host', default='0.0.0.0',
                help='Name of the executor node. This can be an opaque '
                     'identifier. It is not necessarily a hostname, '
@@ -195,60 +83,28 @@ executor_opts = [
                help='The version of the executor.')
 ]
 
-wf_trace_log_name_opt = cfg.StrOpt(
-    'workflow_trace_log_name',
-    default='workflow_trace',
-    help='Logger name for pretty '
-    'workflow trace output.'
-)
+collector_opts = [
+    cfg.StrOpt('host', default='0.0.0.0',
+               help='Name of the executor node. This can be an opaque '
+                    'identifier. It is not necessarily a hostname, '
+                    'FQDN, or IP address.'),
+    cfg.StrOpt('topic', default='executor',
+               help='The message topic that the executor listens on.'),
+    cfg.StrOpt('version', default='1.0',
+               help='The version of the executor.')
+]
 
 CONF = cfg.CONF
 
-CONF.register_opts(api_opts, group='api')
-CONF.register_opts(engine_opts, group='engine')
 CONF.register_opts(pecan_opts, group='pecan')
-CONF.register_opts(executor_opts, group='executor')
-CONF.register_opt(wf_trace_log_name_opt)
+CONF.register_opts(api_opts, group='api')
+CONF.register_opts(global_manager_opts, group='global_manager')
+CONF.register_opts(local_manager_opts, group='local_manager')
+CONF.register_opts(collector_opts, group='collector')
 
 CONF.register_cli_opt(use_debugger)
 CONF.register_cli_opt(launch_opt)
 
-CONF.import_opt('verbose', 'mistral.openstack.common.log')
-CONF.set_default('verbose', True)
-CONF.import_opt('debug', 'mistral.openstack.common.log')
-CONF.import_opt('log_dir', 'mistral.openstack.common.log')
-CONF.import_opt('log_file', 'mistral.openstack.common.log')
-CONF.import_opt('log_config_append', 'mistral.openstack.common.log')
-CONF.import_opt('log_format', 'mistral.openstack.common.log')
-CONF.import_opt('log_date_format', 'mistral.openstack.common.log')
-CONF.import_opt('use_syslog', 'mistral.openstack.common.log')
-CONF.import_opt('syslog_log_facility', 'mistral.openstack.common.log')
-
-# Extend oslo default_log_levels to include some that are useful for mistral
-# some are in oslo logging already, this is just making sure it stays this
-# way.
-default_log_levels = cfg.CONF.default_log_levels
-
-logs_to_quieten = [
-    'sqlalchemy=WARN',
-    'oslo.messaging=INFO',
-    'iso8601=WARN',
-    'eventlet.wsgi.server=WARN',
-    'stevedore=INFO',
-    'mistral.openstack.common.loopingcall=INFO',
-    'mistral.openstack.common.periodic_task=INFO',
-    'mistral.services.periodic=INFO'
-]
-
-for chatty in logs_to_quieten:
-    if chatty not in default_log_levels:
-        default_log_levels.append(chatty)
-
-cfg.set_defaults(
-    log.log_opts,
-    default_log_levels=default_log_levels
-)
-
 
 def parse_args(args=None, usage=None, default_config_files=None):
     CONF(
diff --git a/terracotta/db.py b/terracotta/db.py
index 193ecbc..5faefb6 100644
--- a/terracotta/db.py
+++ b/terracotta/db.py
@@ -1,10 +1,11 @@
 # Copyright 2012 Anton Beloglazov
+# Copyright 2015 Huawei Technologies Co. Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,29 +13,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from contracts import contract
-from neat.contracts_primitive import *
-
 import datetime
 from sqlalchemy import *
 from sqlalchemy.engine.base import Connection
 
-import logging
-log = logging.getLogger(__name__)
+from oslo_log import log as logging
+
+
+LOG = logging.getLogger(__name__)
 
 
 class Database(object):
     """ A class representing the database, where fields are tables.
     """
 
-    @contract(connection=Connection,
-              hosts=Table,
-              host_resource_usage=Table,
-              vms=Table,
-              vm_resource_usage=Table,
-              vm_migrations=Table,
-              host_states=Table,
-              host_overload=Table)
     def __init__(self, connection, hosts, host_resource_usage, vms,
                  vm_resource_usage, vm_migrations, host_states, host_overload):
         """ Initialize the database.
@@ -56,36 +48,28 @@ class Database(object):
         self.vm_migrations = vm_migrations
         self.host_states = host_states
         self.host_overload = host_overload
-        log.debug('Instantiated a Database object')
+        LOG.debug('Instantiated a Database object')
 
-    @contract
     def select_cpu_mhz_for_vm(self, uuid, n):
         """ Select n last values of CPU MHz for a VM UUID.
 
         :param uuid: The UUID of a VM.
-         :type uuid: str[36]
-
         :param n: The number of last values to select.
-         :type n: int,>0
-
         :return: The list of n last CPU Mhz values.
-         :rtype: list(int)
         """
         sel = select([self.vm_resource_usage.c.cpu_mhz]). \
             where(and_(
-                self.vms.c.id == self.vm_resource_usage.c.vm_id,
-                self.vms.c.uuid == uuid)). \
+            self.vms.c.id == self.vm_resource_usage.c.vm_id,
+            self.vms.c.uuid == uuid)). \
             order_by(self.vm_resource_usage.c.id.desc()). \
             limit(n)
         res = self.connection.execute(sel).fetchall()
         return list(reversed([int(x[0]) for x in res]))
 
-    @contract
     def select_last_cpu_mhz_for_vms(self):
         """ Select the last value of CPU MHz for all the VMs.
 
         :return: A dict of VM UUIDs to the last CPU MHz values.
-         :rtype: dict(str: int)
         """
         vru1 = self.vm_resource_usage
         vru2 = self.vm_resource_usage.alias()
@@ -105,31 +89,25 @@ class Database(object):
                 vms_last_mhz[str(uuid)] = 0
         return vms_last_mhz
 
-    @contract
     def select_vm_id(self, uuid):
         """ Select the ID of a VM by the VM UUID, or insert a new record.
 
         :param uuid: The UUID of a VM.
-         :type uuid: str[36]
-
         :return: The ID of the VM.
-         :rtype: int
         """
         sel = select([self.vms.c.id]).where(self.vms.c.uuid == uuid)
         row = self.connection.execute(sel).fetchone()
         if row is None:
             id = self.vms.insert().execute(uuid=uuid).inserted_primary_key[0]
-            log.info('Created a new DB record for a VM %s, id=%d', uuid, id)
+            LOG.info('Created a new DB record for a VM %s, id=%d', uuid, id)
             return int(id)
         else:
             return int(row['id'])
 
-    @contract
     def insert_vm_cpu_mhz(self, data):
         """ Insert a set of CPU MHz values for a set of VMs.
 
         :param data: A dictionary of VM UUIDs and CPU MHz values.
-         :type data: dict(str : int)
         """
         if data:
             query = []
@@ -139,24 +117,14 @@ class Database(object):
                               'cpu_mhz': cpu_mhz})
             self.vm_resource_usage.insert().execute(query)
 
-    @contract
     def update_host(self, hostname, cpu_mhz, cpu_cores, ram):
         """ Insert new or update the corresponding host record.
 
         :param hostname: A host name.
-         :type hostname: str
-
         :param cpu_mhz: The total CPU frequency of the host in MHz.
-         :type cpu_mhz: int,>0
-
         :param cpu_cores: The number of physical CPU cores.
-         :type cpu_cores: int,>0
-
         :param ram: The total amount of RAM of the host in MB.
-         :type ram: long,>0
-
         :return: The ID of the host.
-         :rtype: int
         """
         sel = select([self.hosts.c.id]). \
             where(self.hosts.c.hostname == hostname)
@@ -167,7 +135,7 @@ class Database(object):
                 cpu_mhz=cpu_mhz,
                 cpu_cores=cpu_cores,
                 ram=ram).inserted_primary_key[0]
-            log.info('Created a new DB record for a host %s, id=%d',
+            LOG.info('Created a new DB record for a host %s, id=%d',
                      hostname, id)
             return int(id)
         else:
@@ -178,48 +146,36 @@ class Database(object):
                                            ram=ram))
             return int(row['id'])
 
-    @contract
     def insert_host_cpu_mhz(self, hostname, cpu_mhz):
         """ Insert a CPU MHz value for a host.
 
         :param hostname: A host name.
-         :type hostname: str
-
         :param cpu_mhz: The CPU usage of the host in MHz.
-         :type cpu_mhz: int
         """
         self.host_resource_usage.insert().execute(
             host_id=self.select_host_id(hostname),
             cpu_mhz=cpu_mhz)
 
-    @contract
     def select_cpu_mhz_for_host(self, hostname, n):
         """ Select n last values of CPU MHz for a host.
 
         :param hostname: A host name.
-         :type hostname: str
-
         :param n: The number of last values to select.
-         :type n: int,>0
-
         :return: The list of n last CPU Mhz values.
-         :rtype: list(int)
         """
         sel = select([self.host_resource_usage.c.cpu_mhz]). \
             where(and_(
-                self.hosts.c.id == self.host_resource_usage.c.host_id,
-                self.hosts.c.hostname == hostname)). \
+            self.hosts.c.id == self.host_resource_usage.c.host_id,
+            self.hosts.c.hostname == hostname)). \
             order_by(self.host_resource_usage.c.id.desc()). \
             limit(n)
         res = self.connection.execute(sel).fetchall()
         return list(reversed([int(x[0]) for x in res]))
 
-    @contract
     def select_last_cpu_mhz_for_hosts(self):
         """ Select the last value of CPU MHz for all the hosts.
 
         :return: A dict of host names to the last CPU MHz values.
-         :rtype: dict(str: int)
         """
         hru1 = self.host_resource_usage
         hru2 = self.host_resource_usage.alias()
@@ -227,7 +183,7 @@ class Database(object):
             hru1.outerjoin(hru2, and_(
                 hru1.c.host_id == hru2.c.host_id,
                 hru1.c.id < hru2.c.id))]). \
-             where(hru2.c.id == None)
+            where(hru2.c.id == None)
         hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall())
 
         sel = select([self.hosts.c.id, self.hosts.c.hostname])
@@ -241,12 +197,10 @@ class Database(object):
                 hosts_last_mhz[str(hostname)] = 0
         return hosts_last_mhz
 
-    @contract
     def select_host_characteristics(self):
         """ Select the characteristics of all the hosts.
 
         :return: Three dicts of hostnames to CPU MHz, cores, and RAM.
-         :rtype: tuple(dict(str: int), dict(str: int), dict(str: int))
         """
         hosts_cpu_mhz = {}
         hosts_cpu_cores = {}
@@ -258,15 +212,11 @@ class Database(object):
             hosts_ram[hostname] = int(x[4])
         return hosts_cpu_mhz, hosts_cpu_cores, hosts_ram
 
-    @contract
     def select_host_id(self, hostname):
         """ Select the ID of a host.
 
         :param hostname: A host name.
-         :type hostname: str
-
         :return: The ID of the host.
-         :rtype: int
         """
         sel = select([self.hosts.c.id]). \
             where(self.hosts.c.hostname == hostname)
@@ -275,44 +225,36 @@ class Database(object):
             raise LookupError('No host found for hostname: %s', hostname)
         return int(row['id'])
 
-    @contract
     def select_host_ids(self):
         """ Select the IDs of all the hosts.
 
         :return: A dict of host names to IDs.
-         :rtype: dict(str: int)
         """
         return dict((str(x[1]), int(x[0]))
                     for x in self.hosts.select().execute().fetchall())
 
-    @contract(datetime_threshold=datetime.datetime)
     def cleanup_vm_resource_usage(self, datetime_threshold):
         """ Delete VM resource usage data older than the threshold.
 
         :param datetime_threshold: A datetime threshold.
-         :type datetime_threshold: datetime.datetime
         """
         self.connection.execute(
             self.vm_resource_usage.delete().where(
                 self.vm_resource_usage.c.timestamp < datetime_threshold))
 
-    @contract(datetime_threshold=datetime.datetime)
     def cleanup_host_resource_usage(self, datetime_threshold):
         """ Delete host resource usage data older than the threshold.
 
         :param datetime_threshold: A datetime threshold.
-         :type datetime_threshold: datetime.datetime
         """
         self.connection.execute(
             self.host_resource_usage.delete().where(
                 self.host_resource_usage.c.timestamp < datetime_threshold))
 
-    @contract
     def insert_host_states(self, hosts):
         """ Insert host states for a set of hosts.
 
         :param hosts: A dict of hostnames to states (0, 1).
-         :type hosts: dict(str: int)
         """
         host_ids = self.select_host_ids()
         to_insert = [{'host_id': host_ids[k],
@@ -321,12 +263,10 @@ class Database(object):
         self.connection.execute(
             self.host_states.insert(), to_insert)
 
-    @contract
     def select_host_states(self):
         """ Select the current states of all the hosts.
 
         :return: A dict of host names to states.
-         :rtype: dict(str: int)
         """
         hs1 = self.host_states
         hs2 = self.host_states.alias()
@@ -334,7 +274,7 @@ class Database(object):
             hs1.outerjoin(hs2, and_(
                 hs1.c.host_id == hs2.c.host_id,
                 hs1.c.id < hs2.c.id))]). \
-             where(hs2.c.id == None)
+            where(hs2.c.id == None)
         data = dict(self.connection.execute(sel).fetchall())
         host_ids = self.select_host_ids()
         host_states = {}
@@ -345,51 +285,39 @@ class Database(object):
                 host_states[str(host)] = 1
         return host_states
 
-    @contract
     def select_active_hosts(self):
         """ Select the currently active hosts.
 
         :return: A list of host names.
-         :rtype: list(str)
         """
         return [host
                 for host, state in self.select_host_states().items()
                 if state == 1]
 
-    @contract
     def select_inactive_hosts(self):
         """ Select the currently inactive hosts.
 
         :return: A list of host names.
-         :rtype: list(str)
         """
         return [host
                 for host, state in self.select_host_states().items()
                 if state == 0]
 
-    @contract
     def insert_host_overload(self, hostname, overload):
         """ Insert whether a host is overloaded.
 
         :param hostname: A host name.
-         :type hostname: str
-
         :param overload: Whether the host is overloaded.
-         :type overload: bool
         """
         self.host_overload.insert().execute(
             host_id=self.select_host_id(hostname),
             overload=int(overload))
 
-    @contract
     def insert_vm_migration(self, vm, hostname):
         """ Insert a VM migration.
 
         :param hostname: A VM UUID.
-         :type hostname: str[36]
-
         :param hostname: A host name.
-         :type hostname: str
         """
         self.vm_migrations.insert().execute(
             vm_id=self.select_vm_id(vm),
diff --git a/terracotta/exceptions.py b/terracotta/exceptions.py
new file mode 100644
index 0000000..e69de29
diff --git a/terracotta/globals/manager.py b/terracotta/globals/manager.py
index ad78b95..3486e9c 100644
--- a/terracotta/globals/manager.py
+++ b/terracotta/globals/manager.py
@@ -4,7 +4,7 @@
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -67,550 +67,53 @@ IP addresses of the hosts and their MAC addresses is initialized in
 the beginning of the global manager's execution.
 """
 
-from contracts import contract
-from neat.contracts_primitive import *
-from neat.contracts_extra import *
-
-import bottle
 from hashlib import sha1
-import novaclient
-from novaclient import client
-import time
-import subprocess
-
-import neat.common as common
-from neat.config import *
-from neat.db_utils import *
-
-import logging
-log = logging.getLogger(__name__)
-
 import platform
+
 dist = platform.linux_distribution(full_distribution_name=0)[0]
 if dist in ['redhat', 'centos']:
     etherwake = 'ether-wake'
 else:
     etherwake = 'etherwake'
+import subprocess
+import time
+
+from contracts import contract
+import novaclient
+from novaclient.v2 import client
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from terracotta import common
+from terracotta.contracts_primitive import *
+from terracotta.contracts_extra import *
+from terracotta.utils import db_utils
 
 
-ERRORS = {
-    400: 'Bad input parameter: incorrect or missing parameters',
-    401: 'Unauthorized: user credentials are missing',
-    403: 'Forbidden: user credentials do not much the ones ' +
-         'specified in the configuration file',
-    405: 'Method not allowed: the request is made with ' +
-         'a method other than the only supported PUT',
-    412: 'Precondition failed: the request has been sent more ' +
-         'than 5 seconds ago, the states of the hosts/VMs may ' +
-         'have changed - retry'}
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
 
 
 @contract
-def raise_error(status_code):
-    """ Raise an HTTPResponse exception with the specified status code.
+def host_mac(host):
+    """ Get mac address of a host.
 
-    :param status_code: An HTTP status code of the error.
-     :type status_code: int
-    """
-    if status_code in ERRORS:
-        if status_code == 412 and log.isEnabledFor(logging.INFO):
-            log.info('REST service: %s', ERRORS[status_code])
-        else:
-            log.error('REST service: %s', ERRORS[status_code])
-        raise bottle.HTTPResponse(ERRORS[status_code], status_code)
-    log.error('REST service: Unknown error')
-    raise bottle.HTTPResponse('Unknown error', 500)
+    :param host: A host name.
+     :type host: str
 
-
-@contract
-def validate_params(user, password, params):
-    """ Validate the input request parameters.
-
-    :param user: A sha1-hashed user name to compare to.
-     :type user: str
-
-    :param password: A sha1-hashed password to compare to.
-     :type password: str
-
-    :param params: A dictionary of input parameters.
-     :type params: dict(str: *)
-
-    :return: Whether the parameters are valid.
-     :rtype: bool
-    """
-    if 'username' not in params or 'password' not in params:
-        raise_error(401)
-        return False
-    if params['username'] != user or \
-       params['password'] != password:
-        raise_error(403)
-        return False
-    if 'reason' not in params or \
-       'time' not in params or \
-       'host' not in params or \
-       params['reason'] not in [0, 1] or \
-       params['reason'] == 1 and 'vm_uuids' not in params:
-        raise_error(400)
-        return False
-    if params['time'] + 5 < time.time():
-        raise_error(412)
-        return False
-    if log.isEnabledFor(logging.DEBUG):
-        log.debug('Request parameters validated')
-    return True
-
-
-def start():
-    """ Start the global manager web service.
-    """
-    config = read_and_validate_config([DEFAULT_CONFIG_PATH, CONFIG_PATH],
-                                      REQUIRED_FIELDS)
-
-    common.init_logging(
-        config['log_directory'],
-        'global-manager.log',
-        int(config['log_level']))
-
-    state = init_state(config)
-    switch_hosts_on(state['db'],
-                    config['ether_wake_interface'],
-                    state['host_macs'],
-                    state['compute_hosts'])
-
-    bottle.debug(True)
-    bottle.app().state = {
-        'config': config,
-        'state': state}
-
-    host = config['global_manager_host']
-    port = config['global_manager_port']
-    log.info('Starting the global manager listening to %s:%s', host, port)
-    bottle.run(host=host, port=port)
-
-
-@contract
-def get_params(request):
-    """ Return the request data as a dictionary.
-
-    :param request: A Bottle request object.
-     :type request: *
-
-    :return: The request data dictionary.
-     :rtype: dict(str: *)
-    """
-    params = dict(request.forms)
-    if 'time' in params:
-        params['time'] = float(params['time'])
-    if 'reason' in params:
-        params['reason'] = int(params['reason'])
-    if 'vm_uuids' in params:
-        params['vm_uuids'] = params['vm_uuids'].split(',')
-    return params
-
-
-@contract
-def get_remote_addr(request):
-    """ Return the IP address of the client.
-
-    :param request: A Bottle request object.
-     :type request: *
-
-    :return: The IP address of the remote client.
+    :return: The mac address of the host.
      :rtype: str
     """
-    return bottle.request.remote_addr
-
-
-@bottle.put('/')
-def service():
-    params = get_params(bottle.request)
-    state = bottle.app().state
-    validate_params(state['state']['hashed_username'],
-                    state['state']['hashed_password'],
-                    params)
-    log.info('Received a request from %s: %s',
-             get_remote_addr(bottle.request),
-             str(params))
-    try:
-        if params['reason'] == 0:
-            log.info('Processing an underload of a host %s', params['host'])
-            execute_underload(
-                state['config'],
-                state['state'],
-                params['host'])
-        else:
-            log.info('Processing an overload, VMs: %s', str(params['vm_uuids']))
-            execute_overload(
-                state['config'],
-                state['state'],
-                params['host'],
-                params['vm_uuids'])
-    except:
-        log.exception('Exception during request processing:')
-        raise
-
-
-@bottle.route('/', method='ANY')
-def error():
-    message = 'Method not allowed: the request has been made' + \
-              'with a method other than the only supported PUT'
-    log.error('REST service: %s', message)
-    raise bottle.HTTPResponse(message, 405)
-
-
-@contract
-def init_state(config):
-    """ Initialize a dict for storing the state of the global manager.
-
-    :param config: A config dictionary.
-     :type config: dict(str: *)
-
-    :return: A dict containing the initial state of the global managerr.
-     :rtype: dict
-    """
-    return {'previous_time': 0,
-            'db': init_db(config['sql_connection']),
-            'nova': client.Client(2,
-                                  config['os_admin_user'],
-                                  config['os_admin_password'],
-                                  config['os_admin_tenant_name'],
-                                  config['os_auth_url'],
-                                  service_type="compute"),
-            'hashed_username': sha1(config['os_admin_user']).hexdigest(),
-            'hashed_password': sha1(config['os_admin_password']).hexdigest(),
-            'compute_hosts': common.parse_compute_hosts(
-                                        config['compute_hosts']),
-            'host_macs': {}}
-
-
-@contract
-def execute_underload(config, state, host):
-    """ Process an underloaded host: migrate all VMs from the host.
-
-1. Prepare the data about the current states of the hosts and VMs.
-
-2. Call the function specified in the `algorithm_vm_placement_factory`
-   configuration option and pass the data on the states of the hosts and VMs.
-
-3. Call the Nova API to migrate the VMs according to the placement
-   determined by the `algorithm_vm_placement_factory` algorithm.
-
-4. Switch off the host at the end of the VM migration.
-
-    :param config: A config dictionary.
-     :type config: dict(str: *)
-
-    :param state: A state dictionary.
-     :type state: dict(str: *)
-
-    :param host: A host name.
-     :type host: str
-
-    :return: The updated state dictionary.
-     :rtype: dict(str: *)
-    """
-    log.info('Started processing an underload request')
-    underloaded_host = host
-    hosts_cpu_total, _, hosts_ram_total = state['db'].select_host_characteristics()
-
-    hosts_to_vms = vms_by_hosts(state['nova'], state['compute_hosts'])
-    vms_last_cpu = state['db'].select_last_cpu_mhz_for_vms()
-    hosts_last_cpu = state['db'].select_last_cpu_mhz_for_hosts()
-
-    # Remove VMs from hosts_to_vms that are not in vms_last_cpu
-    # These VMs are new and no data have been collected from them
-    for host, vms in hosts_to_vms.items():
-        for i, vm in enumerate(vms):
-            if not vm in vms_last_cpu:
-                del hosts_to_vms[host][i]
-
-    if log.isEnabledFor(logging.DEBUG):
-        log.debug('hosts_to_vms: %s', str(hosts_to_vms))
-
-    hosts_cpu_usage = {}
-    hosts_ram_usage = {}
-    hosts_to_keep_active = set()
-    for host, vms in hosts_to_vms.items():
-        if vms:
-            host_cpu_mhz = hosts_last_cpu[host]
-            for vm in vms:
-                if vm not in vms_last_cpu:
-                    log.info('No data yet for VM: %s - skipping host %s', vm, host)
-                    hosts_to_keep_active.add(host)
-                    hosts_cpu_total.pop(host, None)
-                    hosts_ram_total.pop(host, None)
-                    hosts_cpu_usage.pop(host, None)
-                    hosts_ram_usage.pop(host, None)
-                    break
-                host_cpu_mhz += vms_last_cpu[vm]
-            else:
-                hosts_cpu_usage[host] = host_cpu_mhz
-                hosts_ram_usage[host] = host_used_ram(state['nova'], host)
-        else:
-            # Exclude inactive hosts
-            hosts_cpu_total.pop(host, None)
-            hosts_ram_total.pop(host, None)
-
-    if log.isEnabledFor(logging.DEBUG):
-        log.debug('Host CPU usage: %s', str(hosts_last_cpu))
-        log.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
-
-    # Exclude the underloaded host
-    hosts_cpu_usage.pop(underloaded_host, None)
-    hosts_cpu_total.pop(underloaded_host, None)
-    hosts_ram_usage.pop(underloaded_host, None)
-    hosts_ram_total.pop(underloaded_host, None)
-
-    if log.isEnabledFor(logging.DEBUG):
-        log.debug('Excluded the underloaded host %s', underloaded_host)
-        log.debug('Host CPU usage: %s', str(hosts_last_cpu))
-        log.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
-
-    vms_to_migrate = vms_by_host(state['nova'], underloaded_host)
-    vms_cpu = {}
-    for vm in vms_to_migrate:
-        if vm not in vms_last_cpu:
-            log.info('No data yet for VM: %s - dropping the request', vm)
-            log.info('Skipped an underload request')
-            return state
-        vms_cpu[vm] = state['db'].select_cpu_mhz_for_vm(
-            vm,
-            int(config['data_collector_data_length']))
-    vms_ram = vms_ram_limit(state['nova'], vms_to_migrate)
-
-    # Remove VMs that are not in vms_ram
-    # These instances might have been deleted
-    for i, vm in enumerate(vms_to_migrate):
-        if not vm in vms_ram:
-            del vms_to_migrate[i]
-
-    if not vms_to_migrate:
-        log.info('No VMs to migrate - completed the underload request')
-        return state
-
-    for vm in vms_cpu.keys():
-        if not vm in vms_ram:
-            del vms_cpu[vm]
-
-    time_step = int(config['data_collector_interval'])
-    migration_time = common.calculate_migration_time(
-        vms_ram,
-        float(config['network_migration_bandwidth']))
-
-    if 'vm_placement' not in state:
-        vm_placement_params = common.parse_parameters(
-            config['algorithm_vm_placement_parameters'])
-        vm_placement_state = None
-        vm_placement = common.call_function_by_name(
-            config['algorithm_vm_placement_factory'],
-            [time_step,
-             migration_time,
-             vm_placement_params])
-        state['vm_placement'] = vm_placement
-        state['vm_placement_state'] = {}
-    else:
-        vm_placement = state['vm_placement']
-        vm_placement_state = state['vm_placement_state']
-
-    log.info('Started underload VM placement')
-    placement, vm_placement_state = vm_placement(
-        hosts_cpu_usage, hosts_cpu_total,
-        hosts_ram_usage, hosts_ram_total,
-        {}, {},
-        vms_cpu, vms_ram,
-        vm_placement_state)
-    log.info('Completed underload VM placement')
-    state['vm_placement_state'] = vm_placement_state
-
-    if log.isEnabledFor(logging.INFO):
-        log.info('Underload: obtained a new placement %s', str(placement))
-
-    active_hosts = hosts_cpu_total.keys()
-    inactive_hosts = set(state['compute_hosts']) - set(active_hosts)
-    prev_inactive_hosts = set(state['db'].select_inactive_hosts())
-    hosts_to_deactivate = list(inactive_hosts
-                               - prev_inactive_hosts
-                               - hosts_to_keep_active)
-
-    if not placement:
-        log.info('Nothing to migrate')
-        if underloaded_host in hosts_to_deactivate:
-            hosts_to_deactivate.remove(underloaded_host)
-    else:
-        log.info('Started underload VM migrations')
-        migrate_vms(state['db'],
-                    state['nova'],
-                    config['vm_instance_directory'],
-                    placement,
-                    bool(config['block_migration']))
-        log.info('Completed underload VM migrations')
-
-    if hosts_to_deactivate:
-        switch_hosts_off(state['db'],
-                         config['sleep_command'],
-                         hosts_to_deactivate)
-
-    log.info('Completed processing an underload request')
-    return state
-
-
-@contract
-def execute_overload(config, state, host, vm_uuids):
-    """ Process an overloaded host: migrate the selected VMs from it.
-
-1. Prepare the data about the current states of the hosts and VMs.
-
-2. Call the function specified in the `algorithm_vm_placement_factory`
-   configuration option and pass the data on the states of the hosts and VMs.
-
-3. Call the Nova API to migrate the VMs according to the placement
-   determined by the `algorithm_vm_placement_factory` algorithm.
-
-4. Switch on the inactive hosts required to accommodate the VMs.
-
-    :param config: A config dictionary.
-     :type config: dict(str: *)
-
-    :param state: A state dictionary.
-     :type state: dict(str: *)
-
-    :param host: A host name.
-     :type host: str
-
-    :param vm_uuids: A list of VM UUIDs to migrate from the host.
-     :type vm_uuids: list(str)
-
-    :return: The updated state dictionary.
-     :rtype: dict(str: *)
-    """
-    log.info('Started processing an overload request')
-    overloaded_host = host
-    hosts_cpu_total, _, hosts_ram_total = state['db'].select_host_characteristics()
-    hosts_to_vms = vms_by_hosts(state['nova'], state['compute_hosts'])
-    vms_last_cpu = state['db'].select_last_cpu_mhz_for_vms()
-    hosts_last_cpu = state['db'].select_last_cpu_mhz_for_hosts()
-
-    # Remove VMs from hosts_to_vms that are not in vms_last_cpu
-    # These VMs are new and no data have been collected from them
-    for host, vms in hosts_to_vms.items():
-        for i, vm in enumerate(vms):
-            if not vm in vms_last_cpu:
-                del hosts_to_vms[host][i]
-
-    hosts_cpu_usage = {}
-    hosts_ram_usage = {}
-    inactive_hosts_cpu = {}
-    inactive_hosts_ram = {}
-    for host, vms in hosts_to_vms.items():
-        if vms:
-            host_cpu_mhz = hosts_last_cpu[host]
-            for vm in vms:
-                if vm not in vms_last_cpu:
-                    log.info('No data yet for VM: %s - skipping host %s', vm, host)
-                    hosts_cpu_total.pop(host, None)
-                    hosts_ram_total.pop(host, None)
-                    hosts_cpu_usage.pop(host, None)
-                    hosts_ram_usage.pop(host, None)
-                    break
-                host_cpu_mhz += vms_last_cpu[vm]
-            else:
-                hosts_cpu_usage[host] = host_cpu_mhz
-                hosts_ram_usage[host] = host_used_ram(state['nova'], host)
-        else:
-            inactive_hosts_cpu[host] = hosts_cpu_total[host]
-            inactive_hosts_ram[host] = hosts_ram_total[host]
-            hosts_cpu_total.pop(host, None)
-            hosts_ram_total.pop(host, None)
-
-    # Exclude the overloaded host
-    hosts_cpu_usage.pop(overloaded_host, None)
-    hosts_cpu_total.pop(overloaded_host, None)
-    hosts_ram_usage.pop(overloaded_host, None)
-    hosts_ram_total.pop(overloaded_host, None)
-
-    if log.isEnabledFor(logging.DEBUG):
-        log.debug('Host CPU usage: %s', str(hosts_last_cpu))
-        log.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
-
-    vms_to_migrate = vm_uuids
-    vms_cpu = {}
-    for vm in vms_to_migrate:
-        if vm not in vms_last_cpu:
-            log.info('No data yet for VM: %s - dropping the request', vm)
-            log.info('Skipped an underload request')
-            return state
-        vms_cpu[vm] = state['db'].select_cpu_mhz_for_vm(
-            vm,
-            int(config['data_collector_data_length']))
-    vms_ram = vms_ram_limit(state['nova'], vms_to_migrate)
-
-    # Remove VMs that are not in vms_ram
-    # These instances might have been deleted
-    for i, vm in enumerate(vms_to_migrate):
-        if not vm in vms_ram:
-            del vms_to_migrate[i]
-
-    if not vms_to_migrate:
-        log.info('No VMs to migrate - completed the overload request')
-        return state
-
-    for vm in vms_cpu.keys():
-        if not vm in vms_ram:
-            del vms_cpu[vm]
-
-    time_step = int(config['data_collector_interval'])
-    migration_time = common.calculate_migration_time(
-        vms_ram,
-        float(config['network_migration_bandwidth']))
-
-    if 'vm_placement' not in state:
-        vm_placement_params = common.parse_parameters(
-            config['algorithm_vm_placement_parameters'])
-        vm_placement_state = None
-        vm_placement = common.call_function_by_name(
-            config['algorithm_vm_placement_factory'],
-            [time_step,
-             migration_time,
-             vm_placement_params])
-        state['vm_placement'] = vm_placement
-        state['vm_placement_state'] = {}
-    else:
-        vm_placement = state['vm_placement']
-        vm_placement_state = state['vm_placement_state']
-
-    log.info('Started overload VM placement')
-    placement, vm_placement_state = vm_placement(
-        hosts_cpu_usage, hosts_cpu_total,
-        hosts_ram_usage, hosts_ram_total,
-        inactive_hosts_cpu, inactive_hosts_ram,
-        vms_cpu, vms_ram,
-        vm_placement_state)
-    log.info('Completed overload VM placement')
-    state['vm_placement_state'] = vm_placement_state
-
-    if log.isEnabledFor(logging.INFO):
-        log.info('Overload: obtained a new placement %s', str(placement))
-
-    if not placement:
-        log.info('Nothing to migrate')
-    else:
-        hosts_to_activate = list(
-            set(inactive_hosts_cpu.keys()).intersection(
-                set(placement.values())))
-        if hosts_to_activate:
-            switch_hosts_on(state['db'],
-                            config['ether_wake_interface'],
-                            state['host_macs'],
-                            hosts_to_activate)
-        log.info('Started overload VM migrations')
-        migrate_vms(state['db'],
-                    state['nova'],
-                    config['vm_instance_directory'],
-                    placement,
-                    bool(config['block_migration']))
-        log.info('Completed overload VM migrations')
-    log.info('Completed processing an overload request')
-    return state
+    mac = subprocess.Popen(
+        ("ping -c 1 {0} > /dev/null;" +
+         "arp -a {0} | awk '{{print $4}}'").format(host),
+        stdout=subprocess.PIPE,
+        shell=True).communicate()[0].strip()
+    if len(mac) != 17:
+        LOG.warning('Received a wrong mac address for %s: %s',
+                    host, mac)
+        return ''
+    return mac
 
 
 @contract
@@ -669,45 +172,6 @@ def host_used_ram(nova, host):
     return data[1].memory_mb
 
 
-@contract
-def host_mac(host):
-    """ Get mac address of a host.
-
-    :param host: A host name.
-     :type host: str
-
-    :return: The mac address of the host.
-     :rtype: str
-    """
-    mac = subprocess.Popen(
-        ("ping -c 1 {0} > /dev/null;" +
-         "arp -a {0} | awk '{{print $4}}'").format(host),
-        stdout=subprocess.PIPE,
-        shell=True).communicate()[0].strip()
-    if len(mac) != 17:
-        log.warning('Received a wrong mac address for %s: %s',
-                    host, mac)
-        return ''
-    return mac
-
-
-@contract
-def vms_by_host(nova, host):
-    """ Get VMs from the specified host using the Nova API.
-
-    :param nova: A Nova client.
-     :type nova: *
-
-    :param host: A host name.
-     :type host: str
-
-    :return: A list of VM UUIDs from the specified host.
-     :rtype: list(str)
-    """
-    return [str(vm.id) for vm in nova.servers.list()
-            if (vm_hostname(vm) == host and str(getattr(vm, 'OS-EXT-STS:vm_state')) == 'active')]
-
-
 @contract
 def vms_by_hosts(nova, hosts):
     """ Get a map of host names to VMs using the Nova API.
@@ -727,6 +191,24 @@ def vms_by_hosts(nova, hosts):
     return result
 
 
+@contract
+def vms_by_host(nova, host):
+    """ Get VMs from the specified host using the Nova API.
+
+    :param nova: A Nova client.
+     :type nova: *
+
+    :param host: A host name.
+     :type host: str
+
+    :return: A list of VM UUIDs from the specified host.
+     :rtype: list(str)
+    """
+    return [str(vm.id) for vm in nova.servers.list()
+            if (vm_hostname(vm) == host and str(
+            getattr(vm, 'OS-EXT-STS:vm_state')) == 'active')]
+
+
 @contract
 def vm_hostname(vm):
     """ Get the name of the host where VM is running.
@@ -762,7 +244,7 @@ def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
     retry_placement = {}
     vms = placement.keys()
     # Migrate only 2 VMs at a time, as otherwise migrations may fail
-    #vm_pairs = [vms[x:x + 2] for x in xrange(0, len(vms), 2)]
+    # vm_pairs = [vms[x:x + 2] for x in xrange(0, len(vms), 2)]
     # Temporary migrates VMs one by one
     vm_pairs = [vms[x:x + 1] for x in xrange(0, len(vms), 1)]
     for vm_pair in vm_pairs:
@@ -776,26 +258,23 @@ def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
         while True:
             for vm_uuid in list(vm_pair):
                 vm = nova.servers.get(vm_uuid)
-                if log.isEnabledFor(logging.DEBUG):
-                    log.debug('VM %s: %s, %s',
-                              vm_uuid,
-                              vm_hostname(vm),
-                              vm.status)
+                LOG.debug('VM %s: %s, %s',
+                          vm_uuid,
+                          vm_hostname(vm),
+                          vm.status)
                 if vm_hostname(vm) == placement[vm_uuid] and \
-                    vm.status == u'ACTIVE':
+                                vm.status == u'ACTIVE':
                     vm_pair.remove(vm_uuid)
                     db.insert_vm_migration(vm_uuid, placement[vm_uuid])
-                    if log.isEnabledFor(logging.INFO):
-                        log.info('Completed migration of VM %s to %s',
-                                 vm_uuid, placement[vm_uuid])
+                    LOG.info('Completed migration of VM %s to %s',
+                             vm_uuid, placement[vm_uuid])
                 elif time.time() - start_time > 300 and \
-                    vm_hostname(vm) != placement[vm_uuid] and \
-                    vm.status == u'ACTIVE':
+                                vm_hostname(vm) != placement[vm_uuid] and \
+                                vm.status == u'ACTIVE':
                     vm_pair.remove(vm_uuid)
                     retry_placement[vm_uuid] = placement[vm_uuid]
-                    if log.isEnabledFor(logging.WARNING):
-                        log.warning('Time-out for migration of VM %s to %s, ' +
-                                    'will retry', vm_uuid, placement[vm_uuid])
+                    LOG.warning('Time-out for migration of VM %s to %s, ' +
+                                'will retry', vm_uuid, placement[vm_uuid])
                 else:
                     break
             else:
@@ -803,9 +282,8 @@ def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
             time.sleep(3)
 
     if retry_placement:
-        if log.isEnabledFor(logging.INFO):
-            log.info('Retrying the following migrations: %s',
-                     str(retry_placement))
+        LOG.info('Retrying the following migrations: %s',
+                 str(retry_placement))
         migrate_vms(db, nova, vm_instance_directory,
                     retry_placement, block_migration)
 
@@ -833,8 +311,7 @@ def migrate_vm(nova, vm_instance_directory, vm, host, block_migration):
     subprocess.call('chown -R nova:nova ' + vm_instance_directory,
                     shell=True)
     nova.servers.live_migrate(vm, host, block_migration, False)
-    if log.isEnabledFor(logging.INFO):
-        log.info('Started migration of VM %s to %s', vm, host)
+    LOG.info('Started migration of VM %s to %s', vm, host)
 
 
 @contract
@@ -852,42 +329,369 @@ def switch_hosts_off(db, sleep_command, hosts):
     """
     if sleep_command:
         for host in hosts:
-            command = 'ssh {0} "{1}"'. \
-                format(host, sleep_command)
-            if log.isEnabledFor(logging.DEBUG):
-                log.debug('Calling: %s', command)
+            command = 'ssh {0} "{1}"'.format(host, sleep_command)
+            LOG.debug('Calling: %s', command)
             subprocess.call(command, shell=True)
-    if log.isEnabledFor(logging.INFO):
-        log.info('Switched off hosts: %s', str(hosts))
+    LOG.info('Switched off hosts: %s', str(hosts))
     db.insert_host_states(dict((x, 0) for x in hosts))
 
 
-@contract
-def switch_hosts_on(db, ether_wake_interface, host_macs, hosts):
-    """ Switch hosts to the active mode.
+class GlobalManager(object):
+    def __init__(self, *args, **kwargs):
+        self.state = self.init_state()
+        self.switch_hosts_on(self.state['compute_hosts'])
 
-    :param db: The database object.
-     :type db: Database
+    def init_state(self):
+        """ Initialize a dict for storing the state of the global manager.
+        """
+        return {'previous_time': 0,
+                'db': db_utils.init_db(),
+                'nova': client.Client(2,
+                                      CONF.os_admin_user,
+                                      CONF.os_admin_password,
+                                      CONF.os_admin_tenant_name,
+                                      CONF.os_auth_url,
+                                      service_type="compute"),
+                'hashed_username': sha1(CONF.os_admin_user).hexdigest(),
+                'hashed_password': sha1(CONF.os_admin_password).hexdigest(),
+                'compute_hosts': common.parse_compute_hosts(
+                    CONF.compute_hosts),
+                'host_macs': {}}
 
-    :param ether_wake_interface: An interface to send a magic packet.
-     :type ether_wake_interface: str
+    def switch_hosts_on(self, hosts):
+        """ Switch hosts to the active mode.
+        """
+        for host in hosts:
+            if host not in self.state['host_macs']:
+                self.state['host_macs'][host] = host_mac(host)
 
-    :param host_macs: A dict of host names to mac addresses.
-     :type host_macs: dict(str: str)
+            command = '{0} -i {1} {2}'.format(
+                etherwake,
+                CONF.ether_wake_interface,
+                self.state['host_macs'][host])
 
-    :param hosts: A list of hosts to switch on.
-     :type hosts: list(str)
-    """
-    for host in hosts:
-        if host not in host_macs:
-            host_macs[host] = host_mac(host)
-        command = '{0} -i {1} {2}'.format(
-            etherwake,
-            ether_wake_interface,
-            host_macs[host])
-        if log.isEnabledFor(logging.DEBUG):
-            log.debug('Calling: %s', command)
-        subprocess.call(command, shell=True)
-    if log.isEnabledFor(logging.INFO):
-        log.info('Switched on hosts: %s', str(hosts))
-    db.insert_host_states(dict((x, 1) for x in hosts))
+            LOG.debug('Calling: %s', command)
+            subprocess.call(command, shell=True)
+
+        LOG.info('Switched on hosts: %s', str(hosts))
+        self.state['db'].insert_host_states(
+            dict((x, 1) for x in hosts))
+
+
+    @contract
+    def execute_underload(self, host):
+        """ Process an underloaded host: migrate all VMs from the host.
+
+        1. Prepare the data about the current states of the hosts and VMs.
+
+        2. Call the function specified in the `algorithm_vm_placement_factory`
+           configuration option and pass the data on the states of the hosts and VMs.
+
+        3. Call the Nova API to migrate the VMs according to the placement
+           determined by the `algorithm_vm_placement_factory` algorithm.
+
+        4. Switch off the host at the end of the VM migration.
+
+        :param host: A host name.
+         :type host: str
+
+        :return: The updated state dictionary.
+         :rtype: dict(str: *)
+        """
+        LOG.info('Started processing an underload request')
+        underloaded_host = host
+        hosts_cpu_total, _, hosts_ram_total = self.state[
+            'db'].select_host_characteristics()
+
+        hosts_to_vms = vms_by_hosts(self.state['nova'],
+                                    self.state['compute_hosts'])
+        vms_last_cpu = self.state['db'].select_last_cpu_mhz_for_vms()
+        hosts_last_cpu = self.state['db'].select_last_cpu_mhz_for_hosts()
+
+        # Remove VMs from hosts_to_vms that are not in vms_last_cpu
+        # These VMs are new and no data have been collected from them
+        for host, vms in hosts_to_vms.items():
+            for i, vm in enumerate(vms):
+                if not vm in vms_last_cpu:
+                    del hosts_to_vms[host][i]
+
+        LOG.debug('hosts_to_vms: %s', str(hosts_to_vms))
+
+        hosts_cpu_usage = {}
+        hosts_ram_usage = {}
+        hosts_to_keep_active = set()
+        for host, vms in hosts_to_vms.items():
+            if vms:
+                host_cpu_mhz = hosts_last_cpu[host]
+                for vm in vms:
+                    if vm not in vms_last_cpu:
+                        LOG.info('No data yet for VM: %s - skipping host %s',
+                                 vm,
+                                 host)
+                        hosts_to_keep_active.add(host)
+                        hosts_cpu_total.pop(host, None)
+                        hosts_ram_total.pop(host, None)
+                        hosts_cpu_usage.pop(host, None)
+                        hosts_ram_usage.pop(host, None)
+                        break
+                    host_cpu_mhz += vms_last_cpu[vm]
+                else:
+                    hosts_cpu_usage[host] = host_cpu_mhz
+                    hosts_ram_usage[host] = host_used_ram(state['nova'], host)
+            else:
+                # Exclude inactive hosts
+                hosts_cpu_total.pop(host, None)
+                hosts_ram_total.pop(host, None)
+
+        LOG.debug('Host CPU usage: %s', str(hosts_last_cpu))
+        LOG.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
+
+        # Exclude the underloaded host
+        hosts_cpu_usage.pop(underloaded_host, None)
+        hosts_cpu_total.pop(underloaded_host, None)
+        hosts_ram_usage.pop(underloaded_host, None)
+        hosts_ram_total.pop(underloaded_host, None)
+
+        LOG.debug('Excluded the underloaded host %s', underloaded_host)
+        LOG.debug('Host CPU usage: %s', str(hosts_last_cpu))
+        LOG.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
+
+        vms_to_migrate = vms_by_host(self.state['nova'], underloaded_host)
+        vms_cpu = {}
+        for vm in vms_to_migrate:
+            if vm not in vms_last_cpu:
+                LOG.info('No data yet for VM: %s - dropping the request', vm)
+                LOG.info('Skipped an underload request')
+                return self.state
+            vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
+                vm,
+                int(CONF.data_collector_data_length))
+        vms_ram = vms_ram_limit(self.state['nova'], vms_to_migrate)
+
+        # Remove VMs that are not in vms_ram
+        # These instances might have been deleted
+        for i, vm in enumerate(vms_to_migrate):
+            if not vm in vms_ram:
+                del vms_to_migrate[i]
+
+        if not vms_to_migrate:
+            LOG.info('No VMs to migrate - completed the underload request')
+            return self.state
+
+        for vm in vms_cpu.keys():
+            if not vm in vms_ram:
+                del vms_cpu[vm]
+
+        time_step = int(CONF.data_collector_interval)
+        migration_time = common.calculate_migration_time(
+            vms_ram,
+            float(CONF.network_migration_bandwidth))
+
+        if 'vm_placement' not in self.state:
+            vm_placement_params = common.parse_parameters(
+                CONF.algorithm_vm_placement_parameters)
+            vm_placement_state = None
+            vm_placement = common.call_function_by_name(
+                CONF.algorithm_vm_placement_factory,
+                [time_step,
+                 migration_time,
+                 vm_placement_params])
+            self.state['vm_placement'] = vm_placement
+            self.state['vm_placement_state'] = {}
+        else:
+            vm_placement = self.state['vm_placement']
+            vm_placement_state = self.state['vm_placement_state']
+
+        LOG.info('Started underload VM placement')
+        placement, vm_placement_state = vm_placement(
+            hosts_cpu_usage, hosts_cpu_total,
+            hosts_ram_usage, hosts_ram_total,
+            {}, {},
+            vms_cpu, vms_ram,
+            vm_placement_state)
+        LOG.info('Completed underload VM placement')
+        self.state['vm_placement_state'] = vm_placement_state
+
+        LOG.info('Underload: obtained a new placement %s', str(placement))
+
+        active_hosts = hosts_cpu_total.keys()
+        inactive_hosts = set(self.state['compute_hosts']) - set(active_hosts)
+        prev_inactive_hosts = set(self.state['db'].select_inactive_hosts())
+        hosts_to_deactivate = list(inactive_hosts
+                                   - prev_inactive_hosts
+                                   - hosts_to_keep_active)
+
+        if not placement:
+            LOG.info('Nothing to migrate')
+            if underloaded_host in hosts_to_deactivate:
+                hosts_to_deactivate.remove(underloaded_host)
+        else:
+            LOG.info('Started underload VM migrations')
+            migrate_vms(self.state['db'],
+                        self.state['nova'],
+                        CONF.vm_instance_directory,
+                        placement,
+                        bool(CONF.block_migration))
+            LOG.info('Completed underload VM migrations')
+
+        if hosts_to_deactivate:
+            switch_hosts_off(self.state['db'],
+                             CONF.sleep_command,
+                             hosts_to_deactivate)
+
+        LOG.info('Completed processing an underload request')
+        return self.state
+
+    def execute_overload(self, host, vm_uuids):
+        """ Process an overloaded host: migrate the selected VMs from it.
+
+        1. Prepare the data about the current states of the hosts and VMs.
+
+        2. Call the function specified in the `algorithm_vm_placement_factory`
+           configuration option and pass the data on the states of the hosts and VMs.
+
+        3. Call the Nova API to migrate the VMs according to the placement
+           determined by the `algorithm_vm_placement_factory` algorithm.
+
+        4. Switch on the inactive hosts required to accommodate the VMs.
+
+        """
+        LOG.info('Started processing an overload request')
+        overloaded_host = host
+        hosts_cpu_total, _, hosts_ram_total = self.state[
+            'db'].select_host_characteristics()
+        hosts_to_vms = vms_by_hosts(state['nova'], self.state['compute_hosts'])
+        vms_last_cpu = self.state['db'].select_last_cpu_mhz_for_vms()
+        hosts_last_cpu = self.state['db'].select_last_cpu_mhz_for_hosts()
+
+        # Remove VMs from hosts_to_vms that are not in vms_last_cpu
+        # These VMs are new and no data have been collected from them
+        for host, vms in hosts_to_vms.items():
+            for i, vm in enumerate(vms):
+                if not vm in vms_last_cpu:
+                    del hosts_to_vms[host][i]
+
+        hosts_cpu_usage = {}
+        hosts_ram_usage = {}
+        inactive_hosts_cpu = {}
+        inactive_hosts_ram = {}
+        for host, vms in hosts_to_vms.items():
+            if vms:
+                host_cpu_mhz = hosts_last_cpu[host]
+                for vm in vms:
+                    if vm not in vms_last_cpu:
+                        LOG.info('No data yet for VM: %s - skipping host %s',
+                                 vm,
+                                 host)
+                        hosts_cpu_total.pop(host, None)
+                        hosts_ram_total.pop(host, None)
+                        hosts_cpu_usage.pop(host, None)
+                        hosts_ram_usage.pop(host, None)
+                        break
+                    host_cpu_mhz += vms_last_cpu[vm]
+                else:
+                    hosts_cpu_usage[host] = host_cpu_mhz
+                    hosts_ram_usage[host] = host_used_ram(self.state['nova'],
+                                                          host)
+            else:
+                inactive_hosts_cpu[host] = hosts_cpu_total[host]
+                inactive_hosts_ram[host] = hosts_ram_total[host]
+                hosts_cpu_total.pop(host, None)
+                hosts_ram_total.pop(host, None)
+
+        # Exclude the overloaded host
+        hosts_cpu_usage.pop(overloaded_host, None)
+        hosts_cpu_total.pop(overloaded_host, None)
+        hosts_ram_usage.pop(overloaded_host, None)
+        hosts_ram_total.pop(overloaded_host, None)
+
+        LOG.debug('Host CPU usage: %s', str(hosts_last_cpu))
+        LOG.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
+
+        vms_to_migrate = vm_uuids
+        vms_cpu = {}
+        for vm in vms_to_migrate:
+            if vm not in vms_last_cpu:
+                LOG.info('No data yet for VM: %s - dropping the request', vm)
+                LOG.info('Skipped an underload request')
+                return self.state
+            vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
+                vm,
+                int(CONF.data_collector_data_length))
+        vms_ram = vms_ram_limit(self.state['nova'], vms_to_migrate)
+
+        # Remove VMs that are not in vms_ram
+        # These instances might have been deleted
+        for i, vm in enumerate(vms_to_migrate):
+            if not vm in vms_ram:
+                del vms_to_migrate[i]
+
+        if not vms_to_migrate:
+            LOG.info('No VMs to migrate - completed the overload request')
+            return self.state
+
+        for vm in vms_cpu.keys():
+            if not vm in vms_ram:
+                del vms_cpu[vm]
+
+        time_step = int(CONF.data_collector_interval)
+        migration_time = common.calculate_migration_time(
+            vms_ram,
+            float(CONF.network_migration_bandwidth))
+
+        if 'vm_placement' not in state:
+            vm_placement_params = common.parse_parameters(
+                CONF.algorithm_vm_placement_parameters)
+            vm_placement_state = None
+            vm_placement = common.call_function_by_name(
+                CONF.algorithm_vm_placement_factory,
+                [time_step,
+                 migration_time,
+                 vm_placement_params])
+            self.state['vm_placement'] = vm_placement
+            self.state['vm_placement_state'] = {}
+        else:
+            vm_placement = self.state['vm_placement']
+            vm_placement_state = self.state['vm_placement_state']
+
+        LOG.info('Started overload VM placement')
+        placement, vm_placement_state = vm_placement(
+            hosts_cpu_usage, hosts_cpu_total,
+            hosts_ram_usage, hosts_ram_total,
+            inactive_hosts_cpu, inactive_hosts_ram,
+            vms_cpu, vms_ram,
+            vm_placement_state)
+        LOG.info('Completed overload VM placement')
+        self.state['vm_placement_state'] = vm_placement_state
+
+        LOG.info('Overload: obtained a new placement %s', str(placement))
+
+        if not placement:
+            LOG.info('Nothing to migrate')
+        else:
+            hosts_to_activate = list(
+                set(inactive_hosts_cpu.keys()).intersection(
+                    set(placement.values())))
+            if hosts_to_activate:
+                self.switch_hosts_on(hosts_to_activate)
+            LOG.info('Started overload VM migrations')
+            migrate_vms(self.state['db'],
+                        self.state['nova'],
+                        CONF.vm_instance_directory,
+                        placement,
+                        bool(CONF.block_migration))
+            LOG.info('Completed overload VM migrations')
+        LOG.info('Completed processing an overload request')
+        return state
+
+    def service(self, reason, host, vm_uuids):
+        try:
+            if reason == 0:
+                LOG.info('Processing an underload of a host %s', host)
+                self.execute_underload(host)
+            else:
+                LOG.info('Processing an overload, VMs: %s', str(vm_uuids))
+                self.execute_overload(host, vm_uuids)
+        except:
+            LOG.exception('Exception during request processing:')
+            raise
diff --git a/terracotta/locals/collector.py b/terracotta/locals/collector.py
index 45c8587..cd603bf 100644
--- a/terracotta/locals/collector.py
+++ b/terracotta/locals/collector.py
@@ -104,666 +104,648 @@ from neat.config import *
 from neat.db_utils import *
 
 import logging
+
+from terracotta.openstack.common import service
+
+
 log = logging.getLogger(__name__)
 
+class Collector(service.Service):
+
+    def __init__(self):
+        super(Service, self).__init__()
+        self.state = self.init_state()
+
+        self.tg.add_dynamic_timer(
+            self.execute,
+            initial_delay=initial_delay,
+            periodic_interval_max=self.periodic_interval_max,
+            self.state
+        )
+
+    @contract
+    def init_state(self):
+        """ Initialize a dict for storing the state of the data collector.
+
+        :param config: A config dictionary.
+         :type config: dict(str: *)
+
+        :return: A dict containing the initial state of the data collector.
+         :rtype: dict
+        """
+        vir_connection = libvirt.openReadOnly(None)
+        if vir_connection is None:
+            message = 'Failed to open a connection to the hypervisor'
+            log.critical(message)
+            raise OSError(message)
+
+        hostname = vir_connection.getHostname()
+        host_cpu_mhz, host_ram = get_host_characteristics(vir_connection)
+        physical_cpus = common.physical_cpu_count(vir_connection)
+        host_cpu_usable_by_vms = float(config['host_cpu_usable_by_vms'])
+
+        db = init_db(config['sql_connection'])
+        db.update_host(hostname,
+                       int(host_cpu_mhz * host_cpu_usable_by_vms),
+                       physical_cpus,
+                       host_ram)
+
+        return {'previous_time': 0.,
+                'previous_cpu_time': dict(),
+                'previous_cpu_mhz': dict(),
+                'previous_host_cpu_time_total': 0.,
+                'previous_host_cpu_time_busy': 0.,
+                'previous_overload': -1,
+                'vir_connection': vir_connection,
+                'hostname': hostname,
+                'host_cpu_overload_threshold':
+                    float(config['host_cpu_overload_threshold']) * \
+                    host_cpu_usable_by_vms,
+                'physical_cpus': physical_cpus,
+                'physical_cpu_mhz': host_cpu_mhz,
+                'physical_core_mhz': host_cpu_mhz / physical_cpus,
+                'db': db}
+
+
+    def execute(self, state):
+        """ Execute a data collection iteration.
+
+    1. Read the names of the files from the <local_data_directory>/vm
+       directory to determine the list of VMs running on the host at the
+       last data collection.
+
+    2. Call the Nova API to obtain the list of VMs that are currently
+       active on the host.
+
+    3. Compare the old and new lists of VMs and determine the newly added
+       or removed VMs.
+
+    4. Delete the files from the <local_data_directory>/vm directory
+       corresponding to the VMs that have been removed from the host.
+
+    5. Fetch the latest data_collector_data_length data values from the
+       central database for each newly added VM using the database
+       connection information specified in the sql_connection option and
+       save the data in the <local_data_directory>/vm directory.
+
+    6. Call the Libvirt API to obtain the CPU time for each VM active on
+       the host. Transform the data obtained from the Libvirt API into the
+       average MHz according to the frequency of the host's CPU and time
+       interval from the previous data collection.
+
+    8. Store the converted data in the <local_data_directory>/vm
+       directory in separate files for each VM, and submit the data to the
+       central database.
+
+        :param config: A config dictionary.
+         :type config: dict(str: *)
+
+        :param state: A state dictionary.
+         :type state: dict(str: *)
+
+        :return: The updated state dictionary.
+         :rtype: dict(str: *)
+        """
+        log.info('Started an iteration')
+        vm_path = common.build_local_vm_path(config['local_data_directory'])
+        host_path = common.build_local_host_path(config['local_data_directory'])
+        data_length = int(config['data_collector_data_length'])
+        vms_previous = get_previous_vms(vm_path)
+        vms_current = get_current_vms(state['vir_connection'])
+
+        vms_added = get_added_vms(vms_previous, vms_current.keys())
+        added_vm_data = dict()
+        if vms_added:
+            if log.isEnabledFor(logging.DEBUG):
+                log.debug('Added VMs: %s', str(vms_added))
+
+            for i, vm in enumerate(vms_added):
+                if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING:
+                    del vms_added[i]
+                    del vms_current[vm]
+                    if log.isEnabledFor(logging.DEBUG):
+                        log.debug('Added VM %s skipped as migrating in', vm)
+
+            added_vm_data = fetch_remote_data(state['db'],
+                                              data_length,
+                                              vms_added)
+            if log.isEnabledFor(logging.DEBUG):
+                log.debug('Fetched remote data: %s', str(added_vm_data))
+            write_vm_data_locally(vm_path, added_vm_data, data_length)
+
+        vms_removed = get_removed_vms(vms_previous, vms_current.keys())
+        if vms_removed:
+            if log.isEnabledFor(logging.DEBUG):
+                log.debug('Removed VMs: %s', str(vms_removed))
+            cleanup_local_vm_data(vm_path, vms_removed)
+            for vm in vms_removed:
+                del state['previous_cpu_time'][vm]
+                del state['previous_cpu_mhz'][vm]
+
+        log.info('Started VM data collection')
+        current_time = time.time()
+        (cpu_time, cpu_mhz) = get_cpu_mhz(state['vir_connection'],
+                                          state['physical_core_mhz'],
+                                          state['previous_cpu_time'],
+                                          state['previous_time'],
+                                          current_time,
+                                          vms_current.keys(),
+                                          state['previous_cpu_mhz'],
+                                          added_vm_data)
+        log.info('Completed VM data collection')
+
+        log.info('Started host data collection')
+        (host_cpu_time_total,
+         host_cpu_time_busy,
+         host_cpu_mhz) = get_host_cpu_mhz(state['physical_cpu_mhz'],
+                                          state['previous_host_cpu_time_total'],
+                                          state['previous_host_cpu_time_busy'])
+        log.info('Completed host data collection')
+
+        if state['previous_time'] > 0:
+            append_vm_data_locally(vm_path, cpu_mhz, data_length)
+            append_vm_data_remotely(state['db'], cpu_mhz)
+
+            total_vms_cpu_mhz = sum(cpu_mhz.values())
+            host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz
+            if host_cpu_mhz_hypervisor < 0:
+                host_cpu_mhz_hypervisor = 0
+            total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor
+            append_host_data_locally(host_path, host_cpu_mhz_hypervisor, data_length)
+            append_host_data_remotely(state['db'],
+                                      state['hostname'],
+                                      host_cpu_mhz_hypervisor)
+
+            if log.isEnabledFor(logging.DEBUG):
+                log.debug('Collected VM CPU MHz: %s', str(cpu_mhz))
+                log.debug('Collected total VMs CPU MHz: %s', str(total_vms_cpu_mhz))
+                log.debug('Collected hypervisor CPU MHz: %s', str(host_cpu_mhz_hypervisor))
+                log.debug('Collected host CPU MHz: %s', str(host_cpu_mhz))
+                log.debug('Collected total CPU MHz: %s', str(total_cpu_mhz))
+
+            state['previous_overload'] = log_host_overload(
+                state['db'],
+                state['host_cpu_overload_threshold'],
+                state['hostname'],
+                state['previous_overload'],
+                state['physical_cpu_mhz'],
+                total_cpu_mhz)
+
+        state['previous_time'] = current_time
+        state['previous_cpu_time'] = cpu_time
+        state['previous_cpu_mhz'] = cpu_mhz
+        state['previous_host_cpu_time_total'] = host_cpu_time_total
+        state['previous_host_cpu_time_busy'] = host_cpu_time_busy
+
+        log.info('Completed an iteration')
+        return state
+
+
+    @contract
+    def get_previous_vms(self, path):
+        """ Get a list of VM UUIDs from the path.
+
+        :param path: A path to read VM UUIDs from.
+         :type path: str
+
+        :return: The list of VM UUIDs from the path.
+         :rtype: list(str)
+        """
+        return os.listdir(path)
+
+
+    @contract()
+    def get_current_vms(self, vir_connection):
+        """ Get a dict of VM UUIDs to states from libvirt.
+
+        :param vir_connection: A libvirt connection object.
+         :type vir_connection: virConnect
+
+        :return: The dict of VM UUIDs to states from libvirt.
+         :rtype: dict(str: int)
+        """
+        vm_uuids = {}
+        for vm_id in vir_connection.listDomainsID():
+            try:
+                vm = vir_connection.lookupByID(vm_id)
+                vm_uuids[vm.UUIDString()] = vm.state(0)[0]
+            except libvirt.libvirtError:
+                pass
+        return vm_uuids
+
+
+    @contract
+    def get_added_vms(self, previous_vms, current_vms):
+        """ Get a list of newly added VM UUIDs.
+
+        :param previous_vms: A list of VMs at the previous time frame.
+         :type previous_vms: list(str)
+
+        :param current_vms: A list of VM at the current time frame.
+         :type current_vms: list(str)
+
+        :return: A list of VM UUIDs added since the last time frame.
+         :rtype: list(str)
+        """
+        return substract_lists(current_vms, previous_vms)
+
+
+    @contract
+    def get_removed_vms(self, previous_vms, current_vms):
+        """ Get a list of VM UUIDs removed since the last time frame.
 
-@contract
-def start():
-    """ Start the data collector loop.
-
-    :return: The final state.
-     :rtype: dict(str: *)
-    """
-    config = read_and_validate_config([DEFAULT_CONFIG_PATH, CONFIG_PATH],
-                                      REQUIRED_FIELDS)
-
-    common.init_logging(
-        config['log_directory'],
-        'data-collector.log',
-        int(config['log_level']))
-
-    vm_path = common.build_local_vm_path(config['local_data_directory'])
-    if not os.access(vm_path, os.F_OK):
-        os.makedirs(vm_path)
-        log.info('Created a local VM data directory: %s', vm_path)
-    else:
-        cleanup_all_local_data(config['local_data_directory'])
-        log.info('Creaned up the local data directory: %s',
-                 config['local_data_directory'])
-
-    interval = config['data_collector_interval']
-    log.info('Starting the data collector, ' +
-             'iterations every %s seconds', interval)
-    return common.start(
-        init_state,
-        execute,
-        config,
-        int(interval))
-
-
-@contract
-def init_state(config):
-    """ Initialize a dict for storing the state of the data collector.
-
-    :param config: A config dictionary.
-     :type config: dict(str: *)
-
-    :return: A dict containing the initial state of the data collector.
-     :rtype: dict
-    """
-    vir_connection = libvirt.openReadOnly(None)
-    if vir_connection is None:
-        message = 'Failed to open a connection to the hypervisor'
-        log.critical(message)
-        raise OSError(message)
-
-    hostname = vir_connection.getHostname()
-    host_cpu_mhz, host_ram = get_host_characteristics(vir_connection)
-    physical_cpus = common.physical_cpu_count(vir_connection)
-    host_cpu_usable_by_vms = float(config['host_cpu_usable_by_vms'])
-
-    db = init_db(config['sql_connection'])
-    db.update_host(hostname,
-                   int(host_cpu_mhz * host_cpu_usable_by_vms),
-                   physical_cpus,
-                   host_ram)
-
-    return {'previous_time': 0.,
-            'previous_cpu_time': dict(),
-            'previous_cpu_mhz': dict(),
-            'previous_host_cpu_time_total': 0.,
-            'previous_host_cpu_time_busy': 0.,
-            'previous_overload': -1,
-            'vir_connection': vir_connection,
-            'hostname': hostname,
-            'host_cpu_overload_threshold':
-                float(config['host_cpu_overload_threshold']) * \
-                host_cpu_usable_by_vms,
-            'physical_cpus': physical_cpus,
-            'physical_cpu_mhz': host_cpu_mhz,
-            'physical_core_mhz': host_cpu_mhz / physical_cpus,
-            'db': db}
-
-
-def execute(config, state):
-    """ Execute a data collection iteration.
-
-1. Read the names of the files from the <local_data_directory>/vm
-   directory to determine the list of VMs running on the host at the
-   last data collection.
-
-2. Call the Nova API to obtain the list of VMs that are currently
-   active on the host.
-
-3. Compare the old and new lists of VMs and determine the newly added
-   or removed VMs.
-
-4. Delete the files from the <local_data_directory>/vm directory
-   corresponding to the VMs that have been removed from the host.
-
-5. Fetch the latest data_collector_data_length data values from the
-   central database for each newly added VM using the database
-   connection information specified in the sql_connection option and
-   save the data in the <local_data_directory>/vm directory.
-
-6. Call the Libvirt API to obtain the CPU time for each VM active on
-   the host. Transform the data obtained from the Libvirt API into the
-   average MHz according to the frequency of the host's CPU and time
-   interval from the previous data collection.
-
-8. Store the converted data in the <local_data_directory>/vm
-   directory in separate files for each VM, and submit the data to the
-   central database.
-
-    :param config: A config dictionary.
-     :type config: dict(str: *)
-
-    :param state: A state dictionary.
-     :type state: dict(str: *)
-
-    :return: The updated state dictionary.
-     :rtype: dict(str: *)
-    """
-    log.info('Started an iteration')
-    vm_path = common.build_local_vm_path(config['local_data_directory'])
-    host_path = common.build_local_host_path(config['local_data_directory'])
-    data_length = int(config['data_collector_data_length'])
-    vms_previous = get_previous_vms(vm_path)
-    vms_current = get_current_vms(state['vir_connection'])
-
-    vms_added = get_added_vms(vms_previous, vms_current.keys())
-    added_vm_data = dict()
-    if vms_added:
-        if log.isEnabledFor(logging.DEBUG):
-            log.debug('Added VMs: %s', str(vms_added))
-
-        for i, vm in enumerate(vms_added):
-            if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING:
-                del vms_added[i]
-                del vms_current[vm]
-                if log.isEnabledFor(logging.DEBUG):
-                    log.debug('Added VM %s skipped as migrating in', vm)
-
-        added_vm_data = fetch_remote_data(state['db'],
-                                          data_length,
-                                          vms_added)
-        if log.isEnabledFor(logging.DEBUG):
-            log.debug('Fetched remote data: %s', str(added_vm_data))
-        write_vm_data_locally(vm_path, added_vm_data, data_length)
-
-    vms_removed = get_removed_vms(vms_previous, vms_current.keys())
-    if vms_removed:
-        if log.isEnabledFor(logging.DEBUG):
-            log.debug('Removed VMs: %s', str(vms_removed))
-        cleanup_local_vm_data(vm_path, vms_removed)
-        for vm in vms_removed:
-            del state['previous_cpu_time'][vm]
-            del state['previous_cpu_mhz'][vm]
-
-    log.info('Started VM data collection')
-    current_time = time.time()
-    (cpu_time, cpu_mhz) = get_cpu_mhz(state['vir_connection'],
-                                      state['physical_core_mhz'],
-                                      state['previous_cpu_time'],
-                                      state['previous_time'],
-                                      current_time,
-                                      vms_current.keys(),
-                                      state['previous_cpu_mhz'],
-                                      added_vm_data)
-    log.info('Completed VM data collection')
-
-    log.info('Started host data collection')
-    (host_cpu_time_total,
-     host_cpu_time_busy,
-     host_cpu_mhz) = get_host_cpu_mhz(state['physical_cpu_mhz'],
-                                      state['previous_host_cpu_time_total'],
-                                      state['previous_host_cpu_time_busy'])
-    log.info('Completed host data collection')
-
-    if state['previous_time'] > 0:
-        append_vm_data_locally(vm_path, cpu_mhz, data_length)
-        append_vm_data_remotely(state['db'], cpu_mhz)
-
-        total_vms_cpu_mhz = sum(cpu_mhz.values())
-        host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz
-        if host_cpu_mhz_hypervisor < 0:
-            host_cpu_mhz_hypervisor = 0
-        total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor
-        append_host_data_locally(host_path, host_cpu_mhz_hypervisor, data_length)
-        append_host_data_remotely(state['db'],
-                                  state['hostname'],
-                                  host_cpu_mhz_hypervisor)
-
-        if log.isEnabledFor(logging.DEBUG):
-            log.debug('Collected VM CPU MHz: %s', str(cpu_mhz))
-            log.debug('Collected total VMs CPU MHz: %s', str(total_vms_cpu_mhz))
-            log.debug('Collected hypervisor CPU MHz: %s', str(host_cpu_mhz_hypervisor))
-            log.debug('Collected host CPU MHz: %s', str(host_cpu_mhz))
-            log.debug('Collected total CPU MHz: %s', str(total_cpu_mhz))
-
-        state['previous_overload'] = log_host_overload(
-            state['db'],
-            state['host_cpu_overload_threshold'],
-            state['hostname'],
-            state['previous_overload'],
-            state['physical_cpu_mhz'],
-            total_cpu_mhz)
-
-    state['previous_time'] = current_time
-    state['previous_cpu_time'] = cpu_time
-    state['previous_cpu_mhz'] = cpu_mhz
-    state['previous_host_cpu_time_total'] = host_cpu_time_total
-    state['previous_host_cpu_time_busy'] = host_cpu_time_busy
-
-    log.info('Completed an iteration')
-    return state
-
-
-@contract
-def get_previous_vms(path):
-    """ Get a list of VM UUIDs from the path.
-
-    :param path: A path to read VM UUIDs from.
-     :type path: str
-
-    :return: The list of VM UUIDs from the path.
-     :rtype: list(str)
-    """
-    return os.listdir(path)
-
-
-@contract()
-def get_current_vms(vir_connection):
-    """ Get a dict of VM UUIDs to states from libvirt.
-
-    :param vir_connection: A libvirt connection object.
-     :type vir_connection: virConnect
-
-    :return: The dict of VM UUIDs to states from libvirt.
-     :rtype: dict(str: int)
-    """
-    vm_uuids = {}
-    for vm_id in vir_connection.listDomainsID():
-        try:
-            vm = vir_connection.lookupByID(vm_id)
-            vm_uuids[vm.UUIDString()] = vm.state(0)[0]
-        except libvirt.libvirtError:
-            pass
-    return vm_uuids
-
-
-@contract
-def get_added_vms(previous_vms, current_vms):
-    """ Get a list of newly added VM UUIDs.
-
-    :param previous_vms: A list of VMs at the previous time frame.
-     :type previous_vms: list(str)
-
-    :param current_vms: A list of VM at the current time frame.
-     :type current_vms: list(str)
-
-    :return: A list of VM UUIDs added since the last time frame.
-     :rtype: list(str)
-    """
-    return substract_lists(current_vms, previous_vms)
-
-
-@contract
-def get_removed_vms(previous_vms, current_vms):
-    """ Get a list of VM UUIDs removed since the last time frame.
+        :param previous_vms: A list of VMs at the previous time frame.
+         :type previous_vms: list(str)
 
-    :param previous_vms: A list of VMs at the previous time frame.
-     :type previous_vms: list(str)
+        :param current_vms: A list of VM at the current time frame.
+         :type current_vms: list(str)
 
-    :param current_vms: A list of VM at the current time frame.
-     :type current_vms: list(str)
+        :return: A list of VM UUIDs removed since the last time frame.
+         :rtype: list(str)
+        """
+        return substract_lists(previous_vms, current_vms)
 
-    :return: A list of VM UUIDs removed since the last time frame.
-     :rtype: list(str)
-    """
-    return substract_lists(previous_vms, current_vms)
 
+    @contract
+    def substract_lists(self, list1, list2):
+        """ Return the elements of list1 that are not in list2.
 
-@contract
-def substract_lists(list1, list2):
-    """ Return the elements of list1 that are not in list2.
+        :param list1: The first list.
+         :type list1: list
 
-    :param list1: The first list.
-     :type list1: list
+        :param list2: The second list.
+         :type list2: list
 
-    :param list2: The second list.
-     :type list2: list
+        :return: The list of element of list 1 that are not in list2.
+         :rtype: list
+        """
+        return list(set(list1).difference(list2))
 
-    :return: The list of element of list 1 that are not in list2.
-     :rtype: list
-    """
-    return list(set(list1).difference(list2))
 
+    @contract
+    def cleanup_local_vm_data(self, path, vms):
+        """ Delete the local data related to the removed VMs.
 
-@contract
-def cleanup_local_vm_data(path, vms):
-    """ Delete the local data related to the removed VMs.
+        :param path: A path to remove VM data from.
+         :type path: str
 
-    :param path: A path to remove VM data from.
-     :type path: str
+        :param vms: A list of removed VM UUIDs.
+         :type vms: list(str)
+        """
+        for vm in vms:
+            os.remove(os.path.join(path, vm))
 
-    :param vms: A list of removed VM UUIDs.
-     :type vms: list(str)
-    """
-    for vm in vms:
-        os.remove(os.path.join(path, vm))
 
+    @contract
+    def cleanup_all_local_data(self, path):
+        """ Delete all the local data about VMs.
 
-@contract
-def cleanup_all_local_data(path):
-    """ Delete all the local data about VMs.
+        :param path: A path to the local data directory.
+         :type path: str
+        """
+        vm_path = common.build_local_vm_path(path)
+        cleanup_local_vm_data(vm_path, os.listdir(vm_path))
+        host_path = common.build_local_host_path(path)
+        if os.access(host_path, os.F_OK):
+            os.remove(host_path)
 
-    :param path: A path to the local data directory.
-     :type path: str
-    """
-    vm_path = common.build_local_vm_path(path)
-    cleanup_local_vm_data(vm_path, os.listdir(vm_path))
-    host_path = common.build_local_host_path(path)
-    if os.access(host_path, os.F_OK):
-        os.remove(host_path)
 
+    @contract
+    def fetch_remote_data(self, db, data_length, uuids):
+        """ Fetch VM data from the central DB.
 
-@contract
-def fetch_remote_data(db, data_length, uuids):
-    """ Fetch VM data from the central DB.
+        :param db: The database object.
+         :type db: Database
 
-    :param db: The database object.
-     :type db: Database
+        :param data_length: The length of data to fetch.
+         :type data_length: int
 
-    :param data_length: The length of data to fetch.
-     :type data_length: int
+        :param uuids: A list of VM UUIDs to fetch data for.
+         :type uuids: list(str)
 
-    :param uuids: A list of VM UUIDs to fetch data for.
-     :type uuids: list(str)
+        :return: A dictionary of VM UUIDs and the corresponding data.
+         :rtype: dict(str : list(int))
+        """
+        result = dict()
+        for uuid in uuids:
+            result[uuid] = db.select_cpu_mhz_for_vm(uuid, data_length)
+        return result
 
-    :return: A dictionary of VM UUIDs and the corresponding data.
-     :rtype: dict(str : list(int))
-    """
-    result = dict()
-    for uuid in uuids:
-        result[uuid] = db.select_cpu_mhz_for_vm(uuid, data_length)
-    return result
 
+    @contract
+    def write_vm_data_locally(self, path, data, data_length):
+        """ Write a set of CPU MHz values for a set of VMs.
 
-@contract
-def write_vm_data_locally(path, data, data_length):
-    """ Write a set of CPU MHz values for a set of VMs.
+        :param path: A path to write the data to.
+         :type path: str
 
-    :param path: A path to write the data to.
-     :type path: str
+        :param data: A map of VM UUIDs onto the corresponing CPU MHz history.
+         :type data: dict(str : list(int))
 
-    :param data: A map of VM UUIDs onto the corresponing CPU MHz history.
-     :type data: dict(str : list(int))
+        :param data_length: The maximum allowed length of the data.
+         :type data_length: int
+        """
+        for uuid, values in data.items():
+            with open(os.path.join(path, uuid), 'w') as f:
+                if data_length > 0:
+                    f.write('\n'.join([str(x)
+                                       for x in values[-data_length:]]) + '\n')
 
-    :param data_length: The maximum allowed length of the data.
-     :type data_length: int
-    """
-    for uuid, values in data.items():
-        with open(os.path.join(path, uuid), 'w') as f:
-            if data_length > 0:
-                f.write('\n'.join([str(x)
-                                   for x in values[-data_length:]]) + '\n')
 
+    @contract
+    def append_vm_data_locally(self, path, data, data_length):
+        """ Write a CPU MHz value for each out of a set of VMs.
 
-@contract
-def append_vm_data_locally(path, data, data_length):
-    """ Write a CPU MHz value for each out of a set of VMs.
+        :param path: A path to write the data to.
+         :type path: str
 
-    :param path: A path to write the data to.
-     :type path: str
+        :param data: A map of VM UUIDs onto the corresponing CPU MHz values.
+         :type data: dict(str : int)
 
-    :param data: A map of VM UUIDs onto the corresponing CPU MHz values.
-     :type data: dict(str : int)
+        :param data_length: The maximum allowed length of the data.
+         :type data_length: int
+        """
+        for uuid, value in data.items():
+            vm_path = os.path.join(path, uuid)
+            if not os.access(vm_path, os.F_OK):
+                with open(vm_path, 'w') as f:
+                    f.write(str(value) + '\n')
+            else:
+                with open(vm_path, 'r+') as f:
+                    values = deque(f.read().strip().splitlines(), data_length)
+                    values.append(value)
+                    f.truncate(0)
+                    f.seek(0)
+                    f.write('\n'.join([str(x) for x in values]) + '\n')
 
-    :param data_length: The maximum allowed length of the data.
-     :type data_length: int
-    """
-    for uuid, value in data.items():
-        vm_path = os.path.join(path, uuid)
-        if not os.access(vm_path, os.F_OK):
-            with open(vm_path, 'w') as f:
-                f.write(str(value) + '\n')
+
+    @contract
+    def append_vm_data_remotely(self, db, data):
+        """ Submit CPU MHz values to the central database.
+
+        :param db: The database object.
+         :type db: Database
+
+        :param data: A map of VM UUIDs onto the corresponing CPU MHz values.
+         :type data: dict(str : int)
+        """
+        db.insert_vm_cpu_mhz(data)
+
+
+    @contract
+    def append_host_data_locally(self, path, cpu_mhz, data_length):
+        """ Write a CPU MHz value for the host.
+
+        :param path: A path to write the data to.
+         :type path: str
+
+        :param cpu_mhz: A CPU MHz value.
+         :type cpu_mhz: int,>=0
+
+        :param data_length: The maximum allowed length of the data.
+         :type data_length: int
+        """
+        if not os.access(path, os.F_OK):
+            with open(path, 'w') as f:
+                f.write(str(cpu_mhz) + '\n')
         else:
-            with open(vm_path, 'r+') as f:
+            with open(path, 'r+') as f:
                 values = deque(f.read().strip().splitlines(), data_length)
-                values.append(value)
+                values.append(cpu_mhz)
                 f.truncate(0)
                 f.seek(0)
                 f.write('\n'.join([str(x) for x in values]) + '\n')
 
 
-@contract
-def append_vm_data_remotely(db, data):
-    """ Submit CPU MHz values to the central database.
+    @contract
+    def append_host_data_remotely(self, db, hostname, host_cpu_mhz):
+        """ Submit a host CPU MHz value to the central database.
 
-    :param db: The database object.
-     :type db: Database
+        :param db: The database object.
+         :type db: Database
 
-    :param data: A map of VM UUIDs onto the corresponing CPU MHz values.
-     :type data: dict(str : int)
-    """
-    db.insert_vm_cpu_mhz(data)
+        :param hostname: The host name.
+         :type hostname: str
+
+        :param host_cpu_mhz: An average host CPU utilization in MHz.
+         :type host_cpu_mhz: int,>=0
+        """
+        db.insert_host_cpu_mhz(hostname, host_cpu_mhz)
 
 
-@contract
-def append_host_data_locally(path, cpu_mhz, data_length):
-    """ Write a CPU MHz value for the host.
+    @contract
+    def get_cpu_mhz(self, vir_connection, physical_core_mhz, previous_cpu_time,
+                    previous_time, current_time, current_vms,
+                    previous_cpu_mhz, added_vm_data):
+        """ Get the average CPU utilization in MHz for a set of VMs.
 
-    :param path: A path to write the data to.
-     :type path: str
+        :param vir_connection: A libvirt connection object.
+         :type vir_connection: virConnect
 
-    :param cpu_mhz: A CPU MHz value.
-     :type cpu_mhz: int,>=0
+        :param physical_core_mhz: The core frequency of the physical CPU in MHz.
+         :type physical_core_mhz: int
 
-    :param data_length: The maximum allowed length of the data.
-     :type data_length: int
-    """
-    if not os.access(path, os.F_OK):
-        with open(path, 'w') as f:
-            f.write(str(cpu_mhz) + '\n')
-    else:
-        with open(path, 'r+') as f:
-            values = deque(f.read().strip().splitlines(), data_length)
-            values.append(cpu_mhz)
-            f.truncate(0)
-            f.seek(0)
-            f.write('\n'.join([str(x) for x in values]) + '\n')
+        :param previous_cpu_time: A dict of previous CPU times for the VMs.
+         :type previous_cpu_time: dict(str : int)
 
+        :param previous_time: The previous timestamp.
+         :type previous_time: float
 
-@contract
-def append_host_data_remotely(db, hostname, host_cpu_mhz):
-    """ Submit a host CPU MHz value to the central database.
+        :param current_time: The previous timestamp.
+         :type current_time: float
 
-    :param db: The database object.
-     :type db: Database
+        :param current_vms: A list of VM UUIDs.
+         :type current_vms: list(str)
 
-    :param hostname: The host name.
-     :type hostname: str
+        :param previous_cpu_mhz: A dict of VM UUIDs and previous CPU MHz.
+         :type previous_cpu_mhz: dict(str : int)
 
-    :param host_cpu_mhz: An average host CPU utilization in MHz.
-     :type host_cpu_mhz: int,>=0
-    """
-    db.insert_host_cpu_mhz(hostname, host_cpu_mhz)
+        :param added_vm_data: A dict of VM UUIDs and the corresponding data.
+         :type added_vm_data: dict(str : list(int))
 
+        :return: The updated CPU times and average CPU utilization in MHz.
+         :rtype: tuple(dict(str : int), dict(str : int))
+        """
+        previous_vms = previous_cpu_time.keys()
+        added_vms = get_added_vms(previous_vms, current_vms)
+        removed_vms = get_removed_vms(previous_vms, current_vms)
+        cpu_mhz = {}
 
-@contract
-def get_cpu_mhz(vir_connection, physical_core_mhz, previous_cpu_time,
-                previous_time, current_time, current_vms,
-                previous_cpu_mhz, added_vm_data):
-    """ Get the average CPU utilization in MHz for a set of VMs.
+        for uuid in removed_vms:
+            del previous_cpu_time[uuid]
 
-    :param vir_connection: A libvirt connection object.
-     :type vir_connection: virConnect
-
-    :param physical_core_mhz: The core frequency of the physical CPU in MHz.
-     :type physical_core_mhz: int
-
-    :param previous_cpu_time: A dict of previous CPU times for the VMs.
-     :type previous_cpu_time: dict(str : int)
-
-    :param previous_time: The previous timestamp.
-     :type previous_time: float
-
-    :param current_time: The previous timestamp.
-     :type current_time: float
-
-    :param current_vms: A list of VM UUIDs.
-     :type current_vms: list(str)
-
-    :param previous_cpu_mhz: A dict of VM UUIDs and previous CPU MHz.
-     :type previous_cpu_mhz: dict(str : int)
-
-    :param added_vm_data: A dict of VM UUIDs and the corresponding data.
-     :type added_vm_data: dict(str : list(int))
-
-    :return: The updated CPU times and average CPU utilization in MHz.
-     :rtype: tuple(dict(str : int), dict(str : int))
-    """
-    previous_vms = previous_cpu_time.keys()
-    added_vms = get_added_vms(previous_vms, current_vms)
-    removed_vms = get_removed_vms(previous_vms, current_vms)
-    cpu_mhz = {}
-
-    for uuid in removed_vms:
-        del previous_cpu_time[uuid]
-
-    for uuid, cpu_time in previous_cpu_time.items():
-        current_cpu_time = get_cpu_time(vir_connection, uuid)
-        if current_cpu_time < cpu_time:
+        for uuid, cpu_time in previous_cpu_time.items():
+            current_cpu_time = get_cpu_time(vir_connection, uuid)
+            if current_cpu_time < cpu_time:
+                if log.isEnabledFor(logging.DEBUG):
+                    log.debug('VM %s: current_cpu_time < cpu_time: ' +
+                              'previous CPU time %d, ' +
+                              'current CPU time %d',
+                              uuid, cpu_time, current_cpu_time)
+                    log.debug('VM %s: using previous CPU MHz %d',
+                              uuid, previous_cpu_mhz[uuid])
+                cpu_mhz[uuid] = previous_cpu_mhz[uuid]
+            else:
+                if log.isEnabledFor(logging.DEBUG):
+                    log.debug('VM %s: previous CPU time %d, ' +
+                              'current CPU time %d, ' +
+                              'previous time %.10f, ' +
+                              'current time %.10f',
+                              uuid, cpu_time, current_cpu_time,
+                              previous_time, current_time)
+                cpu_mhz[uuid] = calculate_cpu_mhz(physical_core_mhz,
+                                                  previous_time,
+                                                  current_time,
+                                                  cpu_time,
+                                                  current_cpu_time)
+            previous_cpu_time[uuid] = current_cpu_time
             if log.isEnabledFor(logging.DEBUG):
-                log.debug('VM %s: current_cpu_time < cpu_time: ' +
-                          'previous CPU time %d, ' +
-                          'current CPU time %d',
-                          uuid, cpu_time, current_cpu_time)
-                log.debug('VM %s: using previous CPU MHz %d',
-                          uuid, previous_cpu_mhz[uuid])
-            cpu_mhz[uuid] = previous_cpu_mhz[uuid]
-        else:
+                log.debug('VM %s: CPU MHz %d', uuid, cpu_mhz[uuid])
+
+        for uuid in added_vms:
+            if added_vm_data[uuid]:
+                cpu_mhz[uuid] = added_vm_data[uuid][-1]
+            previous_cpu_time[uuid] = get_cpu_time(vir_connection, uuid)
+
+        return previous_cpu_time, cpu_mhz
+
+
+    @contract
+    def get_cpu_time(self, vir_connection, uuid):
+        """ Get the CPU time of a VM specified by the UUID using libvirt.
+
+        :param vir_connection: A libvirt connection object.
+         :type vir_connection: virConnect
+
+        :param uuid: The UUID of a VM.
+         :type uuid: str[36]
+
+        :return: The CPU time of the VM.
+         :rtype: int,>=0
+        """
+        try:
+            domain = vir_connection.lookupByUUIDString(uuid)
+            return int(domain.getCPUStats(True, 0)[0]['cpu_time'])
+        except libvirt.libvirtError:
+            return 0
+
+
+    @contract
+    def calculate_cpu_mhz(self, cpu_mhz, previous_time, current_time,
+                          previous_cpu_time, current_cpu_time):
+        """ Calculate the average CPU utilization in MHz for a period of time.
+
+        :param cpu_mhz: The frequency of a core of the physical CPU in MHz.
+         :type cpu_mhz: int
+
+        :param previous_time: The previous time.
+         :type previous_time: float
+
+        :param current_time: The current time.
+         :type current_time: float
+
+        :param previous_cpu_time: The previous CPU time of the domain.
+         :type previous_cpu_time: int
+
+        :param current_cpu_time: The current CPU time of the domain.
+         :type current_cpu_time: int
+
+        :return: The average CPU utilization in MHz.
+         :rtype: int,>=0
+        """
+        return int(cpu_mhz * float(current_cpu_time - previous_cpu_time) / \
+                   ((current_time - previous_time) * 1000000000))
+
+
+    @contract
+    def get_host_cpu_mhz(self, cpu_mhz, previous_cpu_time_total, previous_cpu_time_busy):
+        """ Get the average CPU utilization in MHz for a set of VMs.
+
+        :param cpu_mhz: The total frequency of the physical CPU in MHz.
+         :type cpu_mhz: int
+
+        :param previous_cpu_time_total: The previous total CPU time.
+         :type previous_cpu_time_total: float
+
+        :param previous_cpu_time_busy: The previous busy CPU time.
+         :type previous_cpu_time_busy: float
+
+        :return: The current total and busy CPU time, and CPU utilization in MHz.
+         :rtype: tuple(float, float, int)
+        """
+        cpu_time_total, cpu_time_busy = get_host_cpu_time()
+        cpu_usage = int(cpu_mhz * (cpu_time_busy - previous_cpu_time_busy) / \
+                        (cpu_time_total - previous_cpu_time_total))
+        if cpu_usage < 0:
+            raise ValueError('The host CPU usage in MHz must be >=0, but it is: ' + str(cpu_usage) +
+                             '; cpu_mhz=' + str(cpu_mhz) +
+                             '; previous_cpu_time_total=' + str(previous_cpu_time_total) +
+                             '; cpu_time_total=' + str(cpu_time_total) +
+                             '; previous_cpu_time_busy=' + str(previous_cpu_time_busy) +
+                             '; cpu_time_busy=' + str(cpu_time_busy))
+        return cpu_time_total, \
+               cpu_time_busy, \
+               cpu_usage
+
+
+    @contract()
+    def get_host_cpu_time(self):
+        """ Get the total and busy CPU time of the host.
+
+        :return: A tuple of the total and busy CPU time.
+         :rtype: tuple(float, float)
+        """
+        with open('/proc/stat', 'r') as f:
+            values = [float(x) for x in f.readline().split()[1:8]]
+            return sum(values), sum(values[0:3])
+
+
+    @contract()
+    def get_host_characteristics(self, vir_connection):
+        """ Get the total CPU MHz and RAM of the host.
+
+        :param vir_connection: A libvirt connection object.
+         :type vir_connection: virConnect
+
+        :return: A tuple of the total CPU MHz and RAM of the host.
+         :rtype: tuple(int, long)
+        """
+        info = vir_connection.getInfo()
+        return info[2] * info[3], info[1]
+
+
+    @contract()
+    def log_host_overload(self, db, overload_threshold, hostname, previous_overload,
+                          host_total_mhz, host_utilization_mhz):
+        """ Log to the DB whether the host is overloaded.
+
+        :param db: The database object.
+         :type db: Database
+
+        :param overload_threshold: The host overload threshold.
+         :type overload_threshold: float
+
+        :param hostname: The host name.
+         :type hostname: str
+
+        :param previous_overload: Whether the host has been overloaded.
+         :type previous_overload: int
+
+        :param host_total_mhz: The total frequency of the CPU in MHz.
+         :type host_total_mhz: int
+
+        :param host_utilization_mhz: The total CPU utilization in MHz.
+         :type host_utilization_mhz: int
+
+        :return: Whether the host is overloaded.
+         :rtype: int
+        """
+        overload = overload_threshold * host_total_mhz < host_utilization_mhz
+        overload_int = int(overload)
+        if previous_overload != -1 and previous_overload != overload_int \
+                or previous_overload == -1:
+            db.insert_host_overload(hostname, overload)
             if log.isEnabledFor(logging.DEBUG):
-                log.debug('VM %s: previous CPU time %d, ' +
-                          'current CPU time %d, ' +
-                          'previous time %.10f, ' +
-                          'current time %.10f',
-                          uuid, cpu_time, current_cpu_time,
-                          previous_time, current_time)
-            cpu_mhz[uuid] = calculate_cpu_mhz(physical_core_mhz,
-                                              previous_time,
-                                              current_time,
-                                              cpu_time,
-                                              current_cpu_time)
-        previous_cpu_time[uuid] = current_cpu_time
-        if log.isEnabledFor(logging.DEBUG):
-            log.debug('VM %s: CPU MHz %d', uuid, cpu_mhz[uuid])
+                log.debug('Overload state logged: %s', str(overload))
 
-    for uuid in added_vms:
-        if added_vm_data[uuid]:
-            cpu_mhz[uuid] = added_vm_data[uuid][-1]
-        previous_cpu_time[uuid] = get_cpu_time(vir_connection, uuid)
-
-    return previous_cpu_time, cpu_mhz
-
-
-@contract
-def get_cpu_time(vir_connection, uuid):
-    """ Get the CPU time of a VM specified by the UUID using libvirt.
-
-    :param vir_connection: A libvirt connection object.
-     :type vir_connection: virConnect
-
-    :param uuid: The UUID of a VM.
-     :type uuid: str[36]
-
-    :return: The CPU time of the VM.
-     :rtype: int,>=0
-    """
-    try:
-        domain = vir_connection.lookupByUUIDString(uuid)
-        return int(domain.getCPUStats(True, 0)[0]['cpu_time'])
-    except libvirt.libvirtError:
-        return 0
-
-
-@contract
-def calculate_cpu_mhz(cpu_mhz, previous_time, current_time,
-                      previous_cpu_time, current_cpu_time):
-    """ Calculate the average CPU utilization in MHz for a period of time.
-
-    :param cpu_mhz: The frequency of a core of the physical CPU in MHz.
-     :type cpu_mhz: int
-
-    :param previous_time: The previous time.
-     :type previous_time: float
-
-    :param current_time: The current time.
-     :type current_time: float
-
-    :param previous_cpu_time: The previous CPU time of the domain.
-     :type previous_cpu_time: int
-
-    :param current_cpu_time: The current CPU time of the domain.
-     :type current_cpu_time: int
-
-    :return: The average CPU utilization in MHz.
-     :rtype: int,>=0
-    """
-    return int(cpu_mhz * float(current_cpu_time - previous_cpu_time) / \
-               ((current_time - previous_time) * 1000000000))
-
-
-@contract
-def get_host_cpu_mhz(cpu_mhz, previous_cpu_time_total, previous_cpu_time_busy):
-    """ Get the average CPU utilization in MHz for a set of VMs.
-
-    :param cpu_mhz: The total frequency of the physical CPU in MHz.
-     :type cpu_mhz: int
-
-    :param previous_cpu_time_total: The previous total CPU time.
-     :type previous_cpu_time_total: float
-
-    :param previous_cpu_time_busy: The previous busy CPU time.
-     :type previous_cpu_time_busy: float
-
-    :return: The current total and busy CPU time, and CPU utilization in MHz.
-     :rtype: tuple(float, float, int)
-    """
-    cpu_time_total, cpu_time_busy = get_host_cpu_time()
-    cpu_usage = int(cpu_mhz * (cpu_time_busy - previous_cpu_time_busy) / \
-                    (cpu_time_total - previous_cpu_time_total))
-    if cpu_usage < 0:
-        raise ValueError('The host CPU usage in MHz must be >=0, but it is: ' + str(cpu_usage) +
-                         '; cpu_mhz=' + str(cpu_mhz) +
-                         '; previous_cpu_time_total=' + str(previous_cpu_time_total) +
-                         '; cpu_time_total=' + str(cpu_time_total) +
-                         '; previous_cpu_time_busy=' + str(previous_cpu_time_busy) +
-                         '; cpu_time_busy=' + str(cpu_time_busy))
-    return cpu_time_total, \
-           cpu_time_busy, \
-           cpu_usage
-
-
-@contract()
-def get_host_cpu_time():
-    """ Get the total and busy CPU time of the host.
-
-    :return: A tuple of the total and busy CPU time.
-     :rtype: tuple(float, float)
-    """
-    with open('/proc/stat', 'r') as f:
-        values = [float(x) for x in f.readline().split()[1:8]]
-        return sum(values), sum(values[0:3])
-
-
-@contract()
-def get_host_characteristics(vir_connection):
-    """ Get the total CPU MHz and RAM of the host.
-
-    :param vir_connection: A libvirt connection object.
-     :type vir_connection: virConnect
-
-    :return: A tuple of the total CPU MHz and RAM of the host.
-     :rtype: tuple(int, long)
-    """
-    info = vir_connection.getInfo()
-    return info[2] * info[3], info[1]
-
-
-@contract()
-def log_host_overload(db, overload_threshold, hostname, previous_overload,
-                      host_total_mhz, host_utilization_mhz):
-    """ Log to the DB whether the host is overloaded.
-
-    :param db: The database object.
-     :type db: Database
-
-    :param overload_threshold: The host overload threshold.
-     :type overload_threshold: float
-
-    :param hostname: The host name.
-     :type hostname: str
-
-    :param previous_overload: Whether the host has been overloaded.
-     :type previous_overload: int
-
-    :param host_total_mhz: The total frequency of the CPU in MHz.
-     :type host_total_mhz: int
-
-    :param host_utilization_mhz: The total CPU utilization in MHz.
-     :type host_utilization_mhz: int
-
-    :return: Whether the host is overloaded.
-     :rtype: int
-    """
-    overload = overload_threshold * host_total_mhz < host_utilization_mhz
-    overload_int = int(overload)
-    if previous_overload != -1 and previous_overload != overload_int \
-            or previous_overload == -1:
-        db.insert_host_overload(hostname, overload)
-        if log.isEnabledFor(logging.DEBUG):
-            log.debug('Overload state logged: %s', str(overload))
-
-    return overload_int
+        return overload_int
diff --git a/terracotta/locals/manager.py b/terracotta/locals/manager.py
index 6e6d4a9..6357144 100644
--- a/terracotta/locals/manager.py
+++ b/terracotta/locals/manager.py
@@ -113,215 +113,177 @@ from neat.config import *
 from neat.db_utils import *
 
 import logging
+
+from terracotta.openstack.common import service
+
+
 log = logging.getLogger(__name__)
 
 
-@contract
-def start():
-    """ Start the local manager loop.
+class LocalManager(service.Service):
 
-    :return: The final state.
-     :rtype: dict(str: *)
-    """
-    config = read_and_validate_config([DEFAULT_CONFIG_PATH, CONFIG_PATH],
-                                      REQUIRED_FIELDS)
+    def __init__(self):
+        super(Service, self).__init__()
+        self.state = self.init_state()
 
-    common.init_logging(
-        config['log_directory'],
-        'local-manager.log',
-        int(config['log_level']))
+        self.tg.add_dynamic_timer(
+            self.execute,
+            initial_delay=initial_delay,
+            periodic_interval_max=self.periodic_interval_max,
+            self.state
+        )
 
-    interval = config['local_manager_interval']
-    if log.isEnabledFor(logging.INFO):
-        log.info('Starting the local manager, ' +
-                 'iterations every %s seconds', interval)
-    return common.start(
-        init_state,
-        execute,
-        config,
-        int(interval))
+    def init_state(self):
+        """ Initialize a dict for storing the state of the local manager.
 
+        :param config: A config dictionary.
+         :type config: dict(str: *)
 
-@contract
-def init_state(config):
-    """ Initialize a dict for storing the state of the local manager.
+        :return: A dictionary containing the initial state of the local manager.
+         :rtype: dict
+        """
+        vir_connection = libvirt.openReadOnly(None)
+        if vir_connection is None:
+            message = 'Failed to open a connection to the hypervisor'
+            log.critical(message)
+            raise OSError(message)
 
-    :param config: A config dictionary.
-     :type config: dict(str: *)
+        physical_cpu_mhz_total = int(
+            common.physical_cpu_mhz_total(vir_connection) *
+            float(config['host_cpu_usable_by_vms']))
+        return {'previous_time': 0.,
+                'vir_connection': vir_connection,
+                'db': init_db(config['sql_connection']),
+                'physical_cpu_mhz_total': physical_cpu_mhz_total,
+                'hostname': vir_connection.getHostname(),
+                'hashed_username': sha1(config['os_admin_user']).hexdigest(),
+                'hashed_password': sha1(config['os_admin_password']).hexdigest()}
 
-    :return: A dictionary containing the initial state of the local manager.
-     :rtype: dict
-    """
-    vir_connection = libvirt.openReadOnly(None)
-    if vir_connection is None:
-        message = 'Failed to open a connection to the hypervisor'
-        log.critical(message)
-        raise OSError(message)
+    def execute(self, state):
+        """ Execute an iteration of the local manager.
 
-    physical_cpu_mhz_total = int(
-        common.physical_cpu_mhz_total(vir_connection) *
-        float(config['host_cpu_usable_by_vms']))
-    return {'previous_time': 0.,
-            'vir_connection': vir_connection,
-            'db': init_db(config['sql_connection']),
-            'physical_cpu_mhz_total': physical_cpu_mhz_total,
-            'hostname': vir_connection.getHostname(),
-            'hashed_username': sha1(config['os_admin_user']).hexdigest(),
-            'hashed_password': sha1(config['os_admin_password']).hexdigest()}
+    1. Read the data on resource usage by the VMs running on the host from
+       the <local_data_directory>/vm directory.
 
+    2. Call the function specified in the algorithm_underload_detection
+       configuration option and pass the data on the resource usage by the
+       VMs, as well as the frequency of the CPU as arguments.
 
-@contract
-def execute(config, state):
-    """ Execute an iteration of the local manager.
+    3. If the host is underloaded, send a request to the REST API of the
+       global manager and pass a list of the UUIDs of all the VMs
+       currently running on the host in the vm_uuids parameter, as well as
+       the reason for migration as being 0.
 
-1. Read the data on resource usage by the VMs running on the host from
-   the <local_data_directory>/vm directory.
+    4. If the host is not underloaded, call the function specified in the
+       algorithm_overload_detection configuration option and pass the data
+       on the resource usage by the VMs, as well as the frequency of the
+       host's CPU as arguments.
 
-2. Call the function specified in the algorithm_underload_detection
-   configuration option and pass the data on the resource usage by the
-   VMs, as well as the frequency of the CPU as arguments.
+    5. If the host is overloaded, call the function specified in the
+       algorithm_vm_selection configuration option and pass the data on
+       the resource usage by the VMs, as well as the frequency of the
+       host's CPU as arguments
 
-3. If the host is underloaded, send a request to the REST API of the
-   global manager and pass a list of the UUIDs of all the VMs
-   currently running on the host in the vm_uuids parameter, as well as
-   the reason for migration as being 0.
+    6. If the host is overloaded, send a request to the REST API of the
+       global manager and pass a list of the UUIDs of the VMs selected by
+       the VM selection algorithm in the vm_uuids parameter, as well as
+       the reason for migration as being 1.
 
-4. If the host is not underloaded, call the function specified in the
-   algorithm_overload_detection configuration option and pass the data
-   on the resource usage by the VMs, as well as the frequency of the
-   host's CPU as arguments.
+        :param config: A config dictionary.
+         :type config: dict(str: *)
 
-5. If the host is overloaded, call the function specified in the
-   algorithm_vm_selection configuration option and pass the data on
-   the resource usage by the VMs, as well as the frequency of the
-   host's CPU as arguments
+        :param state: A state dictionary.
+         :type state: dict(str: *)
 
-6. If the host is overloaded, send a request to the REST API of the
-   global manager and pass a list of the UUIDs of the VMs selected by
-   the VM selection algorithm in the vm_uuids parameter, as well as
-   the reason for migration as being 1.
+        :return: The updated state dictionary.
+         :rtype: dict(str: *)
+        """
+        log.info('Started an iteration')
+        vm_path = common.build_local_vm_path(config['local_data_directory'])
+        vm_cpu_mhz = get_local_vm_data(vm_path)
+        vm_ram = get_ram(state['vir_connection'], vm_cpu_mhz.keys())
+        vm_cpu_mhz = cleanup_vm_data(vm_cpu_mhz, vm_ram.keys())
 
-    :param config: A config dictionary.
-     :type config: dict(str: *)
+        if not vm_cpu_mhz:
+            if log.isEnabledFor(logging.INFO):
+                log.info('The host is idle')
+            log.info('Skipped an iteration')
+            return state
 
-    :param state: A state dictionary.
-     :type state: dict(str: *)
+        host_path = common.build_local_host_path(config['local_data_directory'])
+        host_cpu_mhz = get_local_host_data(host_path)
 
-    :return: The updated state dictionary.
-     :rtype: dict(str: *)
-    """
-    log.info('Started an iteration')
-    vm_path = common.build_local_vm_path(config['local_data_directory'])
-    vm_cpu_mhz = get_local_vm_data(vm_path)
-    vm_ram = get_ram(state['vir_connection'], vm_cpu_mhz.keys())
-    vm_cpu_mhz = cleanup_vm_data(vm_cpu_mhz, vm_ram.keys())
+        host_cpu_utilization = vm_mhz_to_percentage(
+            vm_cpu_mhz.values(),
+            host_cpu_mhz,
+            state['physical_cpu_mhz_total'])
+        if log.isEnabledFor(logging.DEBUG):
+            log.debug('The total physical CPU Mhz: %s', str(state['physical_cpu_mhz_total']))
+            log.debug('VM CPU MHz: %s', str(vm_cpu_mhz))
+            log.debug('Host CPU MHz: %s', str(host_cpu_mhz))
+            log.debug('CPU utilization: %s', str(host_cpu_utilization))
+
+        if not host_cpu_utilization:
+            log.info('Not enough data yet - skipping to the next iteration')
+            log.info('Skipped an iteration')
+            return state
+
+        time_step = int(config['data_collector_interval'])
+        migration_time = common.calculate_migration_time(
+            vm_ram, float(config['network_migration_bandwidth']))
+
+        if 'underload_detection' not in state:
+            underload_detection_params = common.parse_parameters(
+                config['algorithm_underload_detection_parameters'])
+            underload_detection = common.call_function_by_name(
+                config['algorithm_underload_detection_factory'],
+                [time_step,
+                 migration_time,
+                 underload_detection_params])
+            state['underload_detection'] = underload_detection
+            state['underload_detection_state'] = {}
+
+            overload_detection_params = common.parse_parameters(
+                config['algorithm_overload_detection_parameters'])
+            overload_detection = common.call_function_by_name(
+                config['algorithm_overload_detection_factory'],
+                [time_step,
+                 migration_time,
+                 overload_detection_params])
+            state['overload_detection'] = overload_detection
+            state['overload_detection_state'] = {}
+
+            vm_selection_params = common.parse_parameters(
+                config['algorithm_vm_selection_parameters'])
+            vm_selection = common.call_function_by_name(
+                config['algorithm_vm_selection_factory'],
+                [time_step,
+                 migration_time,
+                 vm_selection_params])
+            state['vm_selection'] = vm_selection
+            state['vm_selection_state'] = {}
+        else:
+            underload_detection = state['underload_detection']
+            overload_detection = state['overload_detection']
+            vm_selection = state['vm_selection']
 
-    if not vm_cpu_mhz:
         if log.isEnabledFor(logging.INFO):
-            log.info('The host is idle')
-        log.info('Skipped an iteration')
-        return state
-
-    host_path = common.build_local_host_path(config['local_data_directory'])
-    host_cpu_mhz = get_local_host_data(host_path)
-
-    host_cpu_utilization = vm_mhz_to_percentage(
-        vm_cpu_mhz.values(),
-        host_cpu_mhz,
-        state['physical_cpu_mhz_total'])
-    if log.isEnabledFor(logging.DEBUG):
-        log.debug('The total physical CPU Mhz: %s', str(state['physical_cpu_mhz_total']))
-        log.debug('VM CPU MHz: %s', str(vm_cpu_mhz))
-        log.debug('Host CPU MHz: %s', str(host_cpu_mhz))
-        log.debug('CPU utilization: %s', str(host_cpu_utilization))
-
-    if not host_cpu_utilization:
-        log.info('Not enough data yet - skipping to the next iteration')
-        log.info('Skipped an iteration')
-        return state
-
-    time_step = int(config['data_collector_interval'])
-    migration_time = common.calculate_migration_time(
-        vm_ram, float(config['network_migration_bandwidth']))
-
-    if 'underload_detection' not in state:
-        underload_detection_params = common.parse_parameters(
-            config['algorithm_underload_detection_parameters'])
-        underload_detection = common.call_function_by_name(
-            config['algorithm_underload_detection_factory'],
-            [time_step,
-             migration_time,
-             underload_detection_params])
-        state['underload_detection'] = underload_detection
-        state['underload_detection_state'] = {}
-
-        overload_detection_params = common.parse_parameters(
-            config['algorithm_overload_detection_parameters'])
-        overload_detection = common.call_function_by_name(
-            config['algorithm_overload_detection_factory'],
-            [time_step,
-             migration_time,
-             overload_detection_params])
-        state['overload_detection'] = overload_detection
-        state['overload_detection_state'] = {}
-
-        vm_selection_params = common.parse_parameters(
-            config['algorithm_vm_selection_parameters'])
-        vm_selection = common.call_function_by_name(
-            config['algorithm_vm_selection_factory'],
-            [time_step,
-             migration_time,
-             vm_selection_params])
-        state['vm_selection'] = vm_selection
-        state['vm_selection_state'] = {}
-    else:
-        underload_detection = state['underload_detection']
-        overload_detection = state['overload_detection']
-        vm_selection = state['vm_selection']
-
-    if log.isEnabledFor(logging.INFO):
-        log.info('Started underload detection')
-    underload, state['underload_detection_state'] = underload_detection(
-        host_cpu_utilization, state['underload_detection_state'])
-    if log.isEnabledFor(logging.INFO):
-        log.info('Completed underload detection')
-
-    if log.isEnabledFor(logging.INFO):
-        log.info('Started overload detection')
-    overload, state['overload_detection_state'] = overload_detection(
-        host_cpu_utilization, state['overload_detection_state'])
-    if log.isEnabledFor(logging.INFO):
-        log.info('Completed overload detection')
-
-    if underload:
+            log.info('Started underload detection')
+        underload, state['underload_detection_state'] = underload_detection(
+            host_cpu_utilization, state['underload_detection_state'])
         if log.isEnabledFor(logging.INFO):
-            log.info('Underload detected')
-        try:
-            r = requests.put('http://' + config['global_manager_host'] +
-                             ':' + config['global_manager_port'],
-                             {'username': state['hashed_username'],
-                              'password': state['hashed_password'],
-                              'time': time.time(),
-                              'host': state['hostname'],
-                              'reason': 0})
-            if log.isEnabledFor(logging.INFO):
-                log.info('Received response: [%s] %s',
-                         r.status_code, r.content)
-        except requests.exceptions.ConnectionError:
-            log.exception('Exception at underload request:')
+            log.info('Completed underload detection')
 
-    else:
-        if overload:
-            if log.isEnabledFor(logging.INFO):
-                log.info('Overload detected')
-
-            log.info('Started VM selection')
-            vm_uuids, state['vm_selection_state'] = vm_selection(
-                vm_cpu_mhz, vm_ram, state['vm_selection_state'])
-            log.info('Completed VM selection')
+        if log.isEnabledFor(logging.INFO):
+            log.info('Started overload detection')
+        overload, state['overload_detection_state'] = overload_detection(
+            host_cpu_utilization, state['overload_detection_state'])
+        if log.isEnabledFor(logging.INFO):
+            log.info('Completed overload detection')
 
+        if underload:
             if log.isEnabledFor(logging.INFO):
-                log.info('Selected VMs to migrate: %s', str(vm_uuids))
+                log.info('Underload detected')
             try:
                 r = requests.put('http://' + config['global_manager_host'] +
                                  ':' + config['global_manager_port'],
@@ -329,137 +291,163 @@ def execute(config, state):
                                   'password': state['hashed_password'],
                                   'time': time.time(),
                                   'host': state['hostname'],
-                                  'reason': 1,
-                                  'vm_uuids': ','.join(vm_uuids)})
+                                  'reason': 0})
                 if log.isEnabledFor(logging.INFO):
                     log.info('Received response: [%s] %s',
                              r.status_code, r.content)
             except requests.exceptions.ConnectionError:
-                log.exception('Exception at overload request:')
+                log.exception('Exception at underload request:')
+
         else:
-            if log.isEnabledFor(logging.INFO):
-                log.info('No underload or overload detected')
+            if overload:
+                if log.isEnabledFor(logging.INFO):
+                    log.info('Overload detected')
 
-    if log.isEnabledFor(logging.INFO):
-        log.info('Completed an iteration')
+                log.info('Started VM selection')
+                vm_uuids, state['vm_selection_state'] = vm_selection(
+                    vm_cpu_mhz, vm_ram, state['vm_selection_state'])
+                log.info('Completed VM selection')
 
-    return state
+                if log.isEnabledFor(logging.INFO):
+                    log.info('Selected VMs to migrate: %s', str(vm_uuids))
+                try:
+                    r = requests.put('http://' + config['global_manager_host'] +
+                                     ':' + config['global_manager_port'],
+                                     {'username': state['hashed_username'],
+                                      'password': state['hashed_password'],
+                                      'time': time.time(),
+                                      'host': state['hostname'],
+                                      'reason': 1,
+                                      'vm_uuids': ','.join(vm_uuids)})
+                    if log.isEnabledFor(logging.INFO):
+                        log.info('Received response: [%s] %s',
+                                 r.status_code, r.content)
+                except requests.exceptions.ConnectionError:
+                    log.exception('Exception at overload request:')
+            else:
+                if log.isEnabledFor(logging.INFO):
+                    log.info('No underload or overload detected')
+
+        if log.isEnabledFor(logging.INFO):
+            log.info('Completed an iteration')
+
+        return state
 
 
-@contract
-def get_local_vm_data(path):
-    """ Read the data about VMs from the local storage.
+    @contract
+    def get_local_vm_data(self, path):
+        """ Read the data about VMs from the local storage.
 
-    :param path: A path to read VM UUIDs from.
-     :type path: str
+        :param path: A path to read VM UUIDs from.
+         :type path: str
 
-    :return: A map of VM UUIDs onto the corresponing CPU MHz values.
-     :rtype: dict(str : list(int))
-    """
-    result = {}
-    for uuid in os.listdir(path):
-        with open(os.path.join(path, uuid), 'r') as f:
-            result[uuid] = [int(x) for x in f.read().strip().splitlines()]
-    return result
+        :return: A map of VM UUIDs onto the corresponing CPU MHz values.
+         :rtype: dict(str : list(int))
+        """
+        result = {}
+        for uuid in os.listdir(path):
+            with open(os.path.join(path, uuid), 'r') as f:
+                result[uuid] = [int(x) for x in f.read().strip().splitlines()]
+        return result
 
 
-@contract
-def get_local_host_data(path):
-    """ Read the data about the host from the local storage.
+    @contract
+    def get_local_host_data(self, path):
+        """ Read the data about the host from the local storage.
 
-    :param path: A path to read the host data from.
-     :type path: str
+        :param path: A path to read the host data from.
+         :type path: str
 
-    :return: A history of the host CPU usage in MHz.
-     :rtype: list(int)
-    """
-    if not os.access(path, os.F_OK):
-        return []
-    with open(path, 'r') as f:
-        result = [int(x) for x in f.read().strip().splitlines()]
-    return result
+        :return: A history of the host CPU usage in MHz.
+         :rtype: list(int)
+        """
+        if not os.access(path, os.F_OK):
+            return []
+        with open(path, 'r') as f:
+            result = [int(x) for x in f.read().strip().splitlines()]
+        return result
 
 
-@contract
-def cleanup_vm_data(vm_data, uuids):
-    """ Remove records for the VMs that are not in the list of UUIDs.
+    @contract
+    def cleanup_vm_data(self, vm_data, uuids):
+        """ Remove records for the VMs that are not in the list of UUIDs.
 
-    :param vm_data: A map of VM UUIDs to some data.
-     :type vm_data: dict(str: *)
+        :param vm_data: A map of VM UUIDs to some data.
+         :type vm_data: dict(str: *)
 
-    :param uuids: A list of VM UUIDs.
-     :type uuids: list(str)
+        :param uuids: A list of VM UUIDs.
+         :type uuids: list(str)
 
-    :return: The cleaned up map of VM UUIDs to data.
-     :rtype: dict(str: *)
-    """
-    for uuid, _ in vm_data.items():
-        if uuid not in uuids:
-            del vm_data[uuid]
-    return vm_data
+        :return: The cleaned up map of VM UUIDs to data.
+         :rtype: dict(str: *)
+        """
+        for uuid, _ in vm_data.items():
+            if uuid not in uuids:
+                del vm_data[uuid]
+        return vm_data
 
 
-@contract
-def get_ram(vir_connection, vms):
-    """ Get the maximum RAM for a set of VM UUIDs.
+    @contract
+    def get_ram(self, vir_connection, vms):
+        """ Get the maximum RAM for a set of VM UUIDs.
 
-    :param vir_connection: A libvirt connection object.
-     :type vir_connection: virConnect
+        :param vir_connection: A libvirt connection object.
+         :type vir_connection: virConnect
 
-    :param vms: A list of VM UUIDs.
-     :type vms: list(str)
+        :param vms: A list of VM UUIDs.
+         :type vms: list(str)
 
-    :return: The maximum RAM for the VM UUIDs.
-     :rtype: dict(str : long)
-    """
-    vms_ram = {}
-    for uuid in vms:
-        ram = get_max_ram(vir_connection, uuid)
-        if ram:
-            vms_ram[uuid] = ram
+        :return: The maximum RAM for the VM UUIDs.
+         :rtype: dict(str : long)
+        """
+        vms_ram = {}
+        for uuid in vms:
+            ram = get_max_ram(vir_connection, uuid)
+            if ram:
+                vms_ram[uuid] = ram
 
-    return vms_ram
+        return vms_ram
 
 
-@contract
-def get_max_ram(vir_connection, uuid):
-    """ Get the max RAM allocated to a VM UUID using libvirt.
+    @contract
+    def get_max_ram(self, vir_connection, uuid):
+        """ Get the max RAM allocated to a VM UUID using libvirt.
 
-    :param vir_connection: A libvirt connection object.
-     :type vir_connection: virConnect
+        :param vir_connection: A libvirt connection object.
+         :type vir_connection: virConnect
 
-    :param uuid: The UUID of a VM.
-     :type uuid: str[36]
+        :param uuid: The UUID of a VM.
+         :type uuid: str[36]
 
-    :return: The maximum RAM of the VM in MB.
-     :rtype: long|None
-    """
-    try:
-        domain = vir_connection.lookupByUUIDString(uuid)
-        return domain.maxMemory() / 1024
-    except libvirt.libvirtError:
-        return None
+        :return: The maximum RAM of the VM in MB.
+         :rtype: long|None
+        """
+        try:
+            domain = vir_connection.lookupByUUIDString(uuid)
+            return domain.maxMemory() / 1024
+        except libvirt.libvirtError:
+            return None
 
 
-@contract
-def vm_mhz_to_percentage(vm_mhz_history, host_mhz_history, physical_cpu_mhz):
-    """ Convert VM CPU utilization to the host's CPU utilization.
+    @contract
+    def vm_mhz_to_percentage(self, vm_mhz_history, host_mhz_history, physical_cpu_mhz):
+        """ Convert VM CPU utilization to the host's CPU utilization.
 
-    :param vm_mhz_history: A list of CPU utilization histories of VMs in MHz.
-     :type vm_mhz_history: list(list(int))
+        :param vm_mhz_history: A list of CPU utilization histories of VMs in MHz.
+         :type vm_mhz_history: list(list(int))
 
-    :param host_mhz_history: A history if the CPU usage by the host in MHz.
-     :type host_mhz_history: list(int)
+        :param host_mhz_history: A history if the CPU usage by the host in MHz.
+         :type host_mhz_history: list(int)
 
-    :param physical_cpu_mhz: The total frequency of the physical CPU in MHz.
-     :type physical_cpu_mhz: int,>0
+        :param physical_cpu_mhz: The total frequency of the physical CPU in MHz.
+         :type physical_cpu_mhz: int,>0
 
-    :return: The history of the host's CPU utilization in percentages.
-     :rtype: list(float)
-    """
-    max_len = max(len(x) for x in vm_mhz_history)
-    if len(host_mhz_history) > max_len:
-        host_mhz_history = host_mhz_history[-max_len:]
-    mhz_history = [[0] * (max_len - len(x)) + x
-                   for x in vm_mhz_history + [host_mhz_history]]
-    return [float(sum(x)) / physical_cpu_mhz for x in zip(*mhz_history)]
+        :return: The history of the host's CPU utilization in percentages.
+         :rtype: list(float)
+        """
+        max_len = max(len(x) for x in vm_mhz_history)
+        if len(host_mhz_history) > max_len:
+            host_mhz_history = host_mhz_history[-max_len:]
+        mhz_history = [[0] * (max_len - len(x)) + x
+                       for x in vm_mhz_history + [host_mhz_history]]
+        return [float(sum(x)) / physical_cpu_mhz for x in zip(*mhz_history)]
diff --git a/terracotta/rpc.py b/terracotta/rpc.py
index 6341859..7776f91 100644
--- a/terracotta/rpc.py
+++ b/terracotta/rpc.py
@@ -1,5 +1,4 @@
-# Copyright 2014 - Mirantis, Inc.
-# Copyright 2015 - StackStorm, Inc.
+# Copyright 2015 Huawei Technologies Co. Ltd
 #
 #    Licensed under the Apache License, Version 2.0 (the "License");
 #    you may not use this file except in compliance with the License.
@@ -71,116 +70,11 @@ def get_executor_client():
     return _EXECUTOR_CLIENT
 
 
-class EngineServer(object):
+class GlobalManagerServer(object):
     """RPC Engine server."""
 
-    def __init__(self, engine):
-        self._engine = engine
-
-    def start_workflow(self, rpc_ctx, workflow_name, workflow_input, params):
-        """Receives calls over RPC to start workflows on engine.
-
-        :param rpc_ctx: RPC request context.
-        :return: Workflow execution.
-        """
-
-        LOG.info(
-            "Received RPC request 'start_workflow'[rpc_ctx=%s,"
-            " workflow_name=%s, workflow_input=%s, params=%s]"
-            % (rpc_ctx, workflow_name, workflow_input, params)
-        )
-
-        return self._engine.start_workflow(
-            workflow_name,
-            workflow_input,
-            **params
-        )
-
-    def on_task_state_change(self, rpc_ctx, task_ex_id, state):
-        return self._engine.on_task_state_change(task_ex_id, state)
-
-    def on_action_complete(self, rpc_ctx, action_ex_id, result_data,
-                           result_error):
-        """Receives RPC calls to communicate action result to engine.
-
-        :param rpc_ctx: RPC request context.
-        :param action_ex_id: Action execution id.
-        :return: Action execution.
-        """
-
-        result = wf_utils.Result(result_data, result_error)
-
-        LOG.info(
-            "Received RPC request 'on_action_complete'[rpc_ctx=%s,"
-            " action_ex_id=%s, result=%s]" % (rpc_ctx, action_ex_id, result)
-        )
-
-        return self._engine.on_action_complete(action_ex_id, result)
-
-    def pause_workflow(self, rpc_ctx, execution_id):
-        """Receives calls over RPC to pause workflows on engine.
-
-        :param rpc_ctx: Request context.
-        :return: Workflow execution.
-        """
-
-        LOG.info(
-            "Received RPC request 'pause_workflow'[rpc_ctx=%s,"
-            " execution_id=%s]" % (rpc_ctx, execution_id)
-        )
-
-        return self._engine.pause_workflow(execution_id)
-
-    def resume_workflow(self, rpc_ctx, execution_id):
-        """Receives calls over RPC to resume workflows on engine.
-
-        :param rpc_ctx: RPC request context.
-        :return: Workflow execution.
-        """
-
-        LOG.info(
-            "Received RPC request 'resume_workflow'[rpc_ctx=%s,"
-            " execution_id=%s]" % (rpc_ctx, execution_id)
-        )
-
-        return self._engine.resume_workflow(execution_id)
-
-    def stop_workflow(self, rpc_ctx, execution_id, state, message=None):
-        """Receives calls over RPC to stop workflows on engine.
-
-        Sets execution state to SUCCESS or ERROR. No more tasks will be
-        scheduled. Running tasks won't be killed, but their results
-        will be ignored.
-
-        :param rpc_ctx: RPC request context.
-        :param execution_id: Workflow execution id.
-        :param state: State assigned to the workflow. Permitted states are
-            SUCCESS or ERROR.
-        :param message: Optional information string.
-
-        :return: Workflow execution.
-        """
-
-        LOG.info(
-            "Received RPC request 'stop_workflow'[rpc_ctx=%s, execution_id=%s,"
-            " state=%s, message=%s]" % (rpc_ctx, execution_id, state, message)
-        )
-
-        return self._engine.stop_workflow(execution_id, state, message)
-
-    def rollback_workflow(self, rpc_ctx, execution_id):
-        """Receives calls over RPC to rollback workflows on engine.
-
-        :param rpc_ctx: RPC request context.
-        :return: Workflow execution.
-        """
-
-        LOG.info(
-            "Received RPC request 'rollback_workflow'[rpc_ctx=%s,"
-            " execution_id=%s]" % (rpc_ctx, execution_id)
-        )
-
-        return self._engine.resume_workflow(execution_id)
+    def __init__(self, manager):
+        self._manager = manager
 
 
 def wrap_messaging_exception(method):
@@ -330,11 +224,11 @@ class EngineClient(base.Engine):
         )
 
 
-class ExecutorServer(object):
+class LocalManagerServer(object):
     """RPC Executor server."""
 
-    def __init__(self, executor):
-        self._executor = executor
+    def __init__(self, manager):
+        self._executor = manager
 
     def run_action(self, rpc_ctx, action_ex_id, action_class_str,
                    attributes, params):
diff --git a/terracotta/utils/__init__.py b/terracotta/utils/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/terracotta/db_utils.py b/terracotta/utils/db_utils.py
similarity index 80%
rename from terracotta/db_utils.py
rename to terracotta/utils/db_utils.py
index b9761a6..c019648 100644
--- a/terracotta/db_utils.py
+++ b/terracotta/utils/db_utils.py
@@ -1,10 +1,11 @@
 # Copyright 2012 Anton Beloglazov
+# Copyright 2015 - Huawei Technologies Co. Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,30 +13,25 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from contracts import contract
-from neat.contracts_primitive import *
-from neat.contracts_extra import *
-
 from sqlalchemy import *
 from sqlalchemy.sql import func
 
-from neat.db import Database
+from oslo_config import cfg
+from oslo_log import log as logging
 
-import logging
-log = logging.getLogger(__name__)
+from terracotta import db as database
 
 
-@contract
-def init_db(sql_connection):
+LOG = logging.getLogger(__name__)
+
+
+def init_db():
     """ Initialize the database.
 
     :param sql_connection: A database connection URL.
-     :type sql_connection: str
-
     :return: The initialized database.
-     :rtype: Database
     """
-    engine = create_engine(sql_connection)  # 'sqlite:///:memory:'
+    engine = create_engine(CONF.sql_connection)
     metadata = MetaData()
     metadata.bind = engine
 
@@ -49,7 +45,8 @@ def init_db(sql_connection):
     host_resource_usage = \
         Table('host_resource_usage', metadata,
               Column('id', Integer, primary_key=True),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
+              Column('host_id', Integer, ForeignKey('hosts.id'),
+                     nullable=False),
               Column('timestamp', DateTime, default=func.now()),
               Column('cpu_mhz', Integer, nullable=False))
 
@@ -68,27 +65,31 @@ def init_db(sql_connection):
         Table('vm_migrations', metadata,
               Column('id', Integer, primary_key=True),
               Column('vm_id', Integer, ForeignKey('vms.id'), nullable=False),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
+              Column('host_id', Integer, ForeignKey('hosts.id'),
+                     nullable=False),
               Column('timestamp', DateTime, default=func.now()))
 
     host_states = \
         Table('host_states', metadata,
               Column('id', Integer, primary_key=True),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
+              Column('host_id', Integer, ForeignKey('hosts.id'),
+                     nullable=False),
               Column('timestamp', DateTime, default=func.now()),
               Column('state', Integer, nullable=False))
 
     host_overload = \
         Table('host_overload', metadata,
               Column('id', Integer, primary_key=True),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
+              Column('host_id', Integer, ForeignKey('hosts.id'),
+                     nullable=False),
               Column('timestamp', DateTime, default=func.now()),
               Column('overload', Integer, nullable=False))
 
     metadata.create_all()
     connection = engine.connect()
-    db = Database(connection, hosts, host_resource_usage, vms,
-                  vm_resource_usage, vm_migrations, host_states, host_overload)
+    db = database.Database(connection, hosts, host_resource_usage, vms,
+                           vm_resource_usage, vm_migrations, host_states,
+                           host_overload)
 
-    log.debug('Initialized a DB connection to %s', sql_connection)
+    LOG.debug('Initialized a DB connection to %s', CONF.sql_connection)
     return db
diff --git a/utils/idle-time-fraction.py b/terracotta/utils/idle-time-fraction.py
similarity index 100%
rename from utils/idle-time-fraction.py
rename to terracotta/utils/idle-time-fraction.py
diff --git a/utils/overload-time-fraction.py b/terracotta/utils/overload-time-fraction.py
similarity index 100%
rename from utils/overload-time-fraction.py
rename to terracotta/utils/overload-time-fraction.py
diff --git a/utils/vm-migrations.py b/terracotta/utils/vm-migrations.py
similarity index 100%
rename from utils/vm-migrations.py
rename to terracotta/utils/vm-migrations.py
diff --git a/terracotta/version.py b/terracotta/version.py
new file mode 100644
index 0000000..e75d519
--- /dev/null
+++ b/terracotta/version.py
@@ -0,0 +1,18 @@
+# Copyright 2015 - Huawei Technologies Co. Ltd
+#
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from pbr import version
+
+version_info = version.VersionInfo('terracotta')
+version_string = version_info.version_string
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..4db8eab
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,16 @@
+hacking>=0.9.2,<0.10
+coverage>=3.6
+pyflakes==0.8.1
+pylint==0.25.2
+sphinx>=1.1.2,!=1.2.0,<1.3
+unittest2
+oslotest
+oslosphinx
+sphinxcontrib-pecanwsme>=0.8
+sphinxcontrib-httpdomain
+docutils==0.9.1
+fixtures>=0.3.14
+nose
+testrepository>=0.0.18
+testtools>=0.9.34
+lockfile>=0.8
diff --git a/all-clean-logs.sh b/tools/all-clean-logs.sh
similarity index 100%
rename from all-clean-logs.sh
rename to tools/all-clean-logs.sh
diff --git a/all-start.sh b/tools/all-start.sh
similarity index 100%
rename from all-start.sh
rename to tools/all-start.sh
diff --git a/all-stop.sh b/tools/all-stop.sh
similarity index 100%
rename from all-stop.sh
rename to tools/all-stop.sh
diff --git a/all-sync-time.sh b/tools/all-sync-time.sh
similarity index 100%
rename from all-sync-time.sh
rename to tools/all-sync-time.sh
diff --git a/all-update.sh b/tools/all-update.sh
similarity index 100%
rename from all-update.sh
rename to tools/all-update.sh
diff --git a/build-rpm.sh b/tools/build-rpm.sh
similarity index 100%
rename from build-rpm.sh
rename to tools/build-rpm.sh
diff --git a/compute-clean-logs.py b/tools/compute-clean-logs.py
similarity index 100%
rename from compute-clean-logs.py
rename to tools/compute-clean-logs.py
diff --git a/compute-clone-neat.py b/tools/compute-clone-neat.py
similarity index 100%
rename from compute-clone-neat.py
rename to tools/compute-clone-neat.py
diff --git a/compute-copy-conf.py b/tools/compute-copy-conf.py
similarity index 100%
rename from compute-copy-conf.py
rename to tools/compute-copy-conf.py
diff --git a/compute-data-collector-start.py b/tools/compute-data-collector-start.py
similarity index 100%
rename from compute-data-collector-start.py
rename to tools/compute-data-collector-start.py
diff --git a/compute-data-collector-status.py b/tools/compute-data-collector-status.py
similarity index 100%
rename from compute-data-collector-status.py
rename to tools/compute-data-collector-status.py
diff --git a/compute-data-collector-stop.py b/tools/compute-data-collector-stop.py
similarity index 100%
rename from compute-data-collector-stop.py
rename to tools/compute-data-collector-stop.py
diff --git a/compute-install-deps.py b/tools/compute-install-deps.py
similarity index 100%
rename from compute-install-deps.py
rename to tools/compute-install-deps.py
diff --git a/compute-install-neat.py b/tools/compute-install-neat.py
similarity index 100%
rename from compute-install-neat.py
rename to tools/compute-install-neat.py
diff --git a/compute-local-manager-start.py b/tools/compute-local-manager-start.py
similarity index 100%
rename from compute-local-manager-start.py
rename to tools/compute-local-manager-start.py
diff --git a/compute-local-manager-status.py b/tools/compute-local-manager-status.py
similarity index 100%
rename from compute-local-manager-status.py
rename to tools/compute-local-manager-status.py
diff --git a/compute-local-manager-stop.py b/tools/compute-local-manager-stop.py
similarity index 100%
rename from compute-local-manager-stop.py
rename to tools/compute-local-manager-stop.py
diff --git a/compute-sync-time.py b/tools/compute-sync-time.py
similarity index 100%
rename from compute-sync-time.py
rename to tools/compute-sync-time.py
diff --git a/compute-update.py b/tools/compute-update.py
similarity index 100%
rename from compute-update.py
rename to tools/compute-update.py
diff --git a/distribute_setup.py b/tools/distribute_setup.py
similarity index 100%
rename from distribute_setup.py
rename to tools/distribute_setup.py
diff --git a/init.d/openstack-neat-data-collector b/tools/init.d/openstack-neat-data-collector
similarity index 100%
rename from init.d/openstack-neat-data-collector
rename to tools/init.d/openstack-neat-data-collector
diff --git a/init.d/openstack-neat-db-cleaner b/tools/init.d/openstack-neat-db-cleaner
similarity index 100%
rename from init.d/openstack-neat-db-cleaner
rename to tools/init.d/openstack-neat-db-cleaner
diff --git a/init.d/openstack-neat-global-manager b/tools/init.d/openstack-neat-global-manager
similarity index 100%
rename from init.d/openstack-neat-global-manager
rename to tools/init.d/openstack-neat-global-manager
diff --git a/init.d/openstack-neat-local-manager b/tools/init.d/openstack-neat-local-manager
similarity index 100%
rename from init.d/openstack-neat-local-manager
rename to tools/init.d/openstack-neat-local-manager
diff --git a/install-rpm.sh b/tools/install-rpm.sh
similarity index 100%
rename from install-rpm.sh
rename to tools/install-rpm.sh
diff --git a/start-data-collector.py b/tools/start-data-collector.py
similarity index 100%
rename from start-data-collector.py
rename to tools/start-data-collector.py
diff --git a/start-global-manager.py b/tools/start-global-manager.py
similarity index 100%
rename from start-global-manager.py
rename to tools/start-global-manager.py
diff --git a/start-local-manager.py b/tools/start-local-manager.py
similarity index 100%
rename from start-local-manager.py
rename to tools/start-local-manager.py
diff --git a/stats.sh b/tools/stats.sh
similarity index 100%
rename from stats.sh
rename to tools/stats.sh
diff --git a/vm-placement.py b/tools/vm-placement.py
similarity index 100%
rename from vm-placement.py
rename to tools/vm-placement.py
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..b9352bc
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,46 @@
+[tox]
+envlist = py27,py33,py34,pep8
+minversion = 1.6
+skipsdist = True
+
+[testenv]
+sitepackages = True
+usedevelop = True
+install_command = pip install -U --force-reinstall {opts} {packages}
+setenv = VIRTUAL_ENV={envdir}
+         PYTHONDONTWRITEBYTECODE = 1
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
+commands =
+  python -m mistral.openstack.common.lockutils python setup.py test --slowest --testr-args='{posargs}'
+whitelist_externals = rm
+
+[testenv:pep8]
+commands = flake8 {posargs}
+
+[testenv:cover]
+# Also do not run test_coverage_ext tests while gathering coverage as those
+# tests conflict with coverage.
+setenv = VIRTUAL_ENV={envdir}
+commands =
+  python setup.py testr --coverage \
+    --testr-args='^(?!.*test.*coverage).*$'
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:docs]
+commands =
+    rm -rf doc/html doc/build
+    rm -rf doc/source/apidoc doc/source/api
+    python setup.py build_sphinx
+
+[testenv:pylint]
+setenv = VIRTUAL_ENV={envdir}
+commands = bash tools/lintstack.sh
+
+[flake8]
+show-source = true
+ignore = H803,H305,H405
+builtins = _
+exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools,scripts
diff --git a/utils/db.py b/utils/db.py
deleted file mode 100644
index bdf1e18..0000000
--- a/utils/db.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# Copyright 2012 Anton Beloglazov
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from contracts import contract
-import datetime
-from sqlalchemy import *
-from sqlalchemy.engine.base import Connection
-
-
-class Database(object):
-    """ A class representing the database, where fields are tables.
-    """
-
-    @contract(connection=Connection,
-              hosts=Table,
-              host_resource_usage=Table,
-              vms=Table,
-              vm_resource_usage=Table,
-              vm_migrations=Table,
-              host_states=Table,
-              host_overload=Table)
-    def __init__(self, connection, hosts, host_resource_usage, vms, 
-                 vm_resource_usage, vm_migrations, host_states, host_overload):
-        """ Initialize the database.
-
-        :param connection: A database connection table.
-        :param hosts: The hosts table.
-        :param host_resource_usage: The host_resource_usage table.
-        :param vms: The vms table.
-        :param vm_resource_usage: The vm_resource_usage table.
-        :param vm_migrations: The vm_migrations table.
-        :param host_states: The host_states table.
-        :param host_overload: The host_overload table.
-        """
-        self.connection = connection
-        self.hosts = hosts
-        self.host_resource_usage = host_resource_usage
-        self.vms = vms
-        self.vm_resource_usage = vm_resource_usage
-        self.vm_migrations = vm_migrations
-        self.host_states = host_states
-        self.host_overload = host_overload
-
-    @contract
-    def select_host_ids(self):
-        """ Select the IDs of all the hosts.
-
-        :return: A dict of host names to IDs.
-         :rtype: dict(str: int)
-        """
-        return dict((str(x[1]), int(x[0])) 
-                    for x in self.hosts.select().execute().fetchall())
-
-    @contract
-    def select_host_states(self, host_id, start_time, end_time):
-        """ Select the states of a host.
-
-        :param start_time: The start time to select host states.
-         :type start_time: *
-
-        :param end_time: The end time to select host states.
-         :type end_time: *
-
-        :return: A list of timestamps and host states.
-         :rtype: list(tuple(*, int))
-        """
-        hs = self.host_states
-        sel = select([hs.c.timestamp, hs.c.state]). \
-            where(and_(hs.c.host_id == host_id,
-                       hs.c.timestamp >= start_time,
-                       hs.c.timestamp <= end_time)). \
-            order_by(hs.c.id.asc())
-        return [(x[0], int(x[1])) 
-                for x in self.connection.execute(sel).fetchall()]
-
-    @contract
-    def select_host_overload(self, host_id, start_time, end_time):
-        """ Select the overload of a host.
-
-        :param start_time: The start time to select host overload.
-         :type start_time: *
-
-        :param end_time: The end time to select host states.
-         :type end_time: *
-
-        :return: A list of timestamps and overloads.
-         :rtype: list(tuple(*, int))
-        """
-        ho = self.host_overload
-        sel = select([ho.c.timestamp, ho.c.overload]). \
-            where(and_(ho.c.host_id == host_id,
-                       ho.c.timestamp >= start_time,
-                       ho.c.timestamp <= end_time)). \
-            order_by(ho.c.id.asc())
-        return [(x[0], int(x[1])) 
-                for x in self.connection.execute(sel).fetchall()]
-
-    @contract
-    def select_vm_migrations(self, start_time, end_time):
-        """ Select VM migrations.
-
-        :param start_time: The start time to select data.
-         :type start_time: *
-
-        :param end_time: The end time to select data.
-         :type end_time: *
-
-        :return: A list of timestamps and VM IDs.
-         :rtype: list(tuple(*, int))
-        """
-        vm = self.vm_migrations
-        sel = select([vm.c.timestamp, vm.c.vm_id]). \
-            where(and_(vm.c.timestamp >= start_time,
-                       vm.c.timestamp <= end_time)). \
-            order_by(vm.c.id.asc())
-        return [(x[0], int(x[1])) 
-                for x in self.connection.execute(sel).fetchall()]
-
-
-@contract
-def init_db(sql_connection):
-    """ Initialize the database.
-
-    :param sql_connection: A database connection URL.
-     :type sql_connection: str
-
-    :return: The initialized database.
-     :rtype: *
-    """
-    engine = create_engine(sql_connection)  # 'sqlite:///:memory:'
-    metadata = MetaData()
-    metadata.bind = engine
-
-    hosts = Table('hosts', metadata,
-                  Column('id', Integer, primary_key=True),
-                  Column('hostname', String(255), nullable=False),
-                  Column('cpu_mhz', Integer, nullable=False),
-                  Column('cpu_cores', Integer, nullable=False),
-                  Column('ram', Integer, nullable=False))
-
-    host_resource_usage = \
-        Table('host_resource_usage', metadata,
-              Column('id', Integer, primary_key=True),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
-              Column('timestamp', DateTime, default=func.now()),
-              Column('cpu_mhz', Integer, nullable=False))
-
-    vms = Table('vms', metadata,
-                Column('id', Integer, primary_key=True),
-                Column('uuid', String(36), nullable=False))
-
-    vm_resource_usage = \
-        Table('vm_resource_usage', metadata,
-              Column('id', Integer, primary_key=True),
-              Column('vm_id', Integer, ForeignKey('vms.id'), nullable=False),
-              Column('timestamp', DateTime, default=func.now()),
-              Column('cpu_mhz', Integer, nullable=False))
-
-    vm_migrations = \
-        Table('vm_migrations', metadata,
-              Column('id', Integer, primary_key=True),
-              Column('vm_id', Integer, ForeignKey('vms.id'), nullable=False),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
-              Column('timestamp', DateTime, default=func.now()))
-
-    host_states = \
-        Table('host_states', metadata,
-              Column('id', Integer, primary_key=True),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
-              Column('timestamp', DateTime, default=func.now()),
-              Column('state', Integer, nullable=False))
-
-    host_overload = \
-        Table('host_overload', metadata,
-              Column('id', Integer, primary_key=True),
-              Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
-              Column('timestamp', DateTime, default=func.now()),
-              Column('overload', Integer, nullable=False))
-
-    metadata.create_all()
-    connection = engine.connect()
-    db = Database(connection, hosts, host_resource_usage, vms, 
-                  vm_resource_usage, vm_migrations, host_states, host_overload)
-
-    return db