From 77d4e1e47fda01c0d8fc0d97d88844e7edf09510 Mon Sep 17 00:00:00 2001
From: Jake Carlson <jake.carlson@att.com>
Date: Tue, 13 Jun 2017 01:45:31 -0500
Subject: [PATCH] Moved logger references to file headers

Moved the logger references in health_checker.py and
listener_manager.py to the file headers to match convention.
Calls to the logger were readjusted to reflect the new
variable names. Also, copied the changes which removed the
use of the pass-in logger and uses oslo_log's log.getLogger()
to handle logging.

Story: #2001031
Task: #4232

Change-Id: I66beb635a78e5f6f0caae0b5a63c55dfdb5d15fe
---
 bin/valet-engine                              |  18 +-
 valet/engine/listener/listener_manager.py     |  44 ++---
 .../optimizer/app_manager/app_handler.py      |  37 ++--
 .../optimizer/app_manager/app_topology.py     |   5 +-
 .../app_manager/app_topology_parser.py        |  16 +-
 .../optimizer/db_connect/music_handler.py     | 148 ++++++++--------
 .../optimizer/ostro/constraint_solver.py      |  44 ++---
 .../optimizer/ostro/openstack_filters.py      |  15 +-
 valet/engine/optimizer/ostro/optimizer.py     |  12 +-
 valet/engine/optimizer/ostro/ostro.py         | 158 +++++++++---------
 valet/engine/optimizer/ostro/search.py        |  57 ++++---
 valet/engine/optimizer/ostro_server/daemon.py |  24 +--
 .../optimizer/ostro_server/health_checker.py  |  16 +-
 valet/engine/resource_manager/compute.py      |  29 ++--
 .../resource_manager/compute_manager.py       | 118 ++++++-------
 valet/engine/resource_manager/resource.py     |  86 +++++-----
 valet/engine/resource_manager/topology.py     |  13 +-
 .../resource_manager/topology_manager.py      |  81 ++++-----
 valet/tests/unit/engine/test_search.py        |   9 +-
 valet/tests/unit/engine/test_topology.py      |   2 +-
 20 files changed, 475 insertions(+), 457 deletions(-)

diff --git a/bin/valet-engine b/bin/valet-engine
index 41a4198..a5e37e6 100755
--- a/bin/valet-engine
+++ b/bin/valet-engine
@@ -13,31 +13,33 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-"""Daemon foe Valet Engine."""
-
 import os
 import sys
 import traceback
+
+from oslo_log import log
+
 from valet.common.conf import get_logger
 from valet.engine.optimizer.ostro.ostro import Ostro
 from valet.engine.optimizer.ostro_server.configuration import Config
 from valet.engine.optimizer.ostro_server.daemon import Daemon
 
+LOG = log.getLogger(__name__)
+
 
 class OstroDaemon(Daemon):
-    """Daemon foe Valet Engine."""
+    """Daemon for Valet Engine."""
 
     def run(self):
         """Run the daemon."""
-        self.logger.info("##### Valet Engine is launched #####")
+        LOG.info("Valet Engine is launched")
         try:
-            ostro = Ostro(config, self.logger)
+            ostro = Ostro(config)
         except Exception:
-            self.logger.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
 
         if ostro.bootstrap() is False:
-            self.logger.error("ostro bootstrap failed")
+            LOG.error("Valet Engine bootstrap failed")
             sys.exit(2)
 
         # Write pidfile
diff --git a/valet/engine/listener/listener_manager.py b/valet/engine/listener/listener_manager.py
index f500410..ec50d14 100644
--- a/valet/engine/listener/listener_manager.py
+++ b/valet/engine/listener/listener_manager.py
@@ -20,11 +20,14 @@ import json
 import pika
 import threading
 import traceback
-from valet.common.conf import get_logger
+
+from oslo_log import log
 from valet.common.music import Music
 from valet.engine.listener.oslo_messages import OsloMessage
 import yaml
 
+LOG = log.getLogger(__name__)
+
 
 class ListenerManager(threading.Thread):
     """Listener Manager Thread Class."""
@@ -35,7 +38,6 @@ class ListenerManager(threading.Thread):
         self.thread_id = _t_id
         self.thread_name = _t_name
         self.config = _config
-        self.listener_logger = get_logger("ostro_listener")
         self.MUSIC = None
 
     def run(self):
@@ -47,8 +49,8 @@ class ListenerManager(threading.Thread):
         credentials = pika.PlainCredentials("guest", "PASSWORD").
         """
         try:
-            self.listener_logger.info("ListenerManager: start " +
-                                      self.thread_name + " ......")
+            LOG.info("ListenerManager: start " +
+                     self.thread_name + " ......")
 
             if self.config.events_listener.store:
 
@@ -58,19 +60,21 @@ class ListenerManager(threading.Thread):
                     'replication_factor': self.config.music.replication_factor,
                     'music_server_retries':
                         self.config.music.music_server_retries,
-                    'logger': self.listener_logger,
+                    'logger': LOG,
                 }
                 engine = Music(**kwargs)
                 engine.create_keyspace(self.config.music.keyspace)
                 self.MUSIC = {'engine': engine,
                               'keyspace': self.config.music.keyspace}
-                self.listener_logger.debug(
-                    'Storing in music on %s, keyspace %s',
-                    self.config.music.hosts, self.config.music.keyspace)
 
-            self.listener_logger.debug('Connecting to %s, with %s',
-                                       self.config.messaging.host,
-                                       self.config.messaging.username)
+                LOG.debug(
+                    'Storing in music on %s, keyspace %s' %
+                    (self.config.music.host, self.config.music.keyspace))
+
+            LOG.debug('Connecting to %s, with %s' %
+                      (self.config.messaging.host,
+                       self.config.messaging.username))
+
             credentials = pika.PlainCredentials(self.config.messaging.username,
                                                 self.config.messaging.password)
             parameters = pika.ConnectionParameters(self.config.messaging.host,
@@ -103,15 +107,16 @@ class ListenerManager(threading.Thread):
             # Bind the queue to the selected exchange
             channel.queue_bind(exchange=exchange_name, queue=queue_name,
                                routing_key=binding_key)
-            self.listener_logger.info('Channel is bound,listening on %s '
-                                      'exchange %s',
-                                      self.config.messaging.host,
-                                      self.config.events_listener.exchange)
+
+            LOG.info('Channel is bound,listening on %s exchange %s',
+                     self.config.messaging.host,
+                     self.config.events_listener.exchange)
 
             # Start consuming messages
             channel.basic_consume(self.on_message, queue_name)
         except Exception:
-            self.listener_logger.error(traceback.format_exc())
+            LOG.error("Failed to start ListenerManager thread: %s",
+                      traceback.format_exc())
             return
 
         try:
@@ -136,13 +141,14 @@ class ListenerManager(threading.Thread):
             else:
                 return
 
-            self.listener_logger.debug(
+            LOG.debug(
                 "\nMessage No: %s\n", method_frame.delivery_tag)
-            self.listener_logger.debug(
+            LOG.debug(
                 json.dumps(message, sort_keys=True, indent=2))
             channel.basic_ack(delivery_tag=method_frame.delivery_tag)
         except Exception:
-            self.listener_logger.error(traceback.format_exc())
+            LOG.error("Could not specify action for message: %s",
+                      traceback.format_exc())
             return
 
     def is_message_wanted(self, message):
diff --git a/valet/engine/optimizer/app_manager/app_handler.py b/valet/engine/optimizer/app_manager/app_handler.py
index ab69e6a..3dbf0a4 100644
--- a/valet/engine/optimizer/app_manager/app_handler.py
+++ b/valet/engine/optimizer/app_manager/app_handler.py
@@ -18,10 +18,14 @@
 import operator
 import time
 
+from oslo_log import log
+
 from valet.engine.optimizer.app_manager.app_topology import AppTopology
 from valet.engine.optimizer.app_manager.app_topology_base import VM
 from valet.engine.optimizer.app_manager.application import App
 
+LOG = log.getLogger(__name__)
+
 
 class AppHistory(object):
 
@@ -40,12 +44,11 @@ class AppHandler(object):
     placement and updating topology info.
     """
 
-    def __init__(self, _resource, _db, _config, _logger):
+    def __init__(self, _resource, _db, _config):
         """Init App Handler Class."""
         self.resource = _resource
         self.db = _db
         self.config = _config
-        self.logger = _logger
 
         """ current app requested, a temporary copy """
         self.apps = {}
@@ -109,7 +112,7 @@ class AppHandler(object):
         """Add app and set or regenerate topology, return updated topology."""
         self.apps.clear()
 
-        app_topology = AppTopology(self.resource, self.logger)
+        app_topology = AppTopology(self.resource)
 
         stack_id = None
         if "stack_id" in _app.keys():
@@ -124,9 +127,7 @@ class AppHandler(object):
             application_name = "none"
 
         action = _app["action"]
-        if action == "ping":
-            self.logger.info("got ping")
-        elif action == "replan" or action == "migrate":
+        if action == "replan" or action == "migrate":
             re_app = self._regenerate_app_topology(stack_id, _app,
                                                    app_topology, action)
             if re_app is None:
@@ -136,14 +137,15 @@ class AppHandler(object):
                 return None
 
             if action == "replan":
-                self.logger.info("got replan: " + stack_id)
+                LOG.info("got replan: " + stack_id)
             elif action == "migrate":
-                self.logger.info("got migration: " + stack_id)
+                LOG.info("got migration: " + stack_id)
 
             app_id = app_topology.set_app_topology(re_app)
 
             if app_id is None:
-                self.logger.error(app_topology.status)
+                LOG.error("Could not set app topology for regererated graph." +
+                          app_topology.status)
                 self.status = app_topology.status
                 self.apps[stack_id] = None
                 return None
@@ -151,12 +153,13 @@ class AppHandler(object):
             app_id = app_topology.set_app_topology(_app)
 
             if len(app_topology.candidate_list_map) > 0:
-                self.logger.info("got ad-hoc placement: " + stack_id)
+                LOG.info("got ad-hoc placement: " + stack_id)
             else:
-                self.logger.info("got placement: " + stack_id)
+                LOG.info("got placement: " + stack_id)
 
             if app_id is None:
-                self.logger.error(app_topology.status)
+                LOG.error("Could not set app topology for app graph" +
+                          app_topology.status)
                 self.status = app_topology.status
                 self.apps[stack_id] = None
                 return None
@@ -216,8 +219,8 @@ class AppHandler(object):
         if self.db is not None:
             for appk, _ in self.apps.iteritems():
                 if self.db.add_app(appk, None) is False:
-                    self.logger.error("AppHandler: error while adding app "
-                                      "info to MUSIC")
+                    LOG.error("AppHandler: error while adding app "
+                              "info to MUSIC")
 
     def get_vm_info(self, _s_uuid, _h_uuid, _host):
         """Return vm_info from database."""
@@ -241,12 +244,10 @@ class AppHandler(object):
 
         old_app = self.db.get_app_info(_stack_id)
         if old_app is None:
-            self.status = "error while getting old_app from MUSIC"
-            self.logger.error(self.status)
+            LOG.error("Error while getting old_app from MUSIC")
             return None
         elif len(old_app) == 0:
-            self.status = "cannot find the old app in MUSIC"
-            self.logger.error(self.status)
+            LOG.error("Cannot find the old app in MUSIC")
             return None
 
         re_app["action"] = "create"
diff --git a/valet/engine/optimizer/app_manager/app_topology.py b/valet/engine/optimizer/app_manager/app_topology.py
index 8e7c88d..01b0e9e 100644
--- a/valet/engine/optimizer/app_manager/app_topology.py
+++ b/valet/engine/optimizer/app_manager/app_topology.py
@@ -24,7 +24,7 @@ class AppTopology(object):
     calculating and setting optimization.
     """
 
-    def __init__(self, _resource, _logger):
+    def __init__(self, _resource):
         """Init App Topology Class."""
         self.vgroups = {}
         self.vms = {}
@@ -38,14 +38,13 @@ class AppTopology(object):
         self.exclusion_list_map = {}
 
         self.resource = _resource
-        self.logger = _logger
 
         # restriction of host naming convention
         high_level_allowed = True
         if "none" in self.resource.datacenter.region_code_list:
             high_level_allowed = False
 
-        self.parser = Parser(high_level_allowed, self.logger)
+        self.parser = Parser(high_level_allowed)
 
         self.total_CPU = 0
         self.total_mem = 0
diff --git a/valet/engine/optimizer/app_manager/app_topology_parser.py b/valet/engine/optimizer/app_manager/app_topology_parser.py
index e7be90a..efa37bc 100644
--- a/valet/engine/optimizer/app_manager/app_topology_parser.py
+++ b/valet/engine/optimizer/app_manager/app_topology_parser.py
@@ -24,13 +24,15 @@
     OS::Heat::ResourceGroup
     OS::Heat::ResourceGroup
 """
-
+from oslo_log import log
 import six
 
 from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
 from valet.engine.optimizer.app_manager.app_topology_base import VGroup
 from valet.engine.optimizer.app_manager.app_topology_base import VM
 
+LOG = log.getLogger(__name__)
+
 
 class Parser(object):
     """Parser Class.
@@ -41,10 +43,8 @@ class Parser(object):
     OS::Heat::Stack OS::Heat::ResourceGroup
     """
 
-    def __init__(self, _high_level_allowed, _logger):
+    def __init__(self, _high_level_allowed):
         """Init Parser Class."""
-        self.logger = _logger
-
         self.high_level_allowed = _high_level_allowed
 
         self.format_version = None
@@ -109,10 +109,10 @@ class Parser(object):
                     if len(r["locations"]) > 0:
                         self.candidate_list_map[rk] = r["locations"]
                 vms[vm.uuid] = vm
-                self.logger.info("vm = " + vm.uuid)
+                LOG.info("vm = " + vm.uuid)
             elif r["type"] == "OS::Cinder::Volume":
-                self.logger.warn("Parser: do nothing for volume at this "
-                                 "version")
+                LOG.warning("Parser: do nothing for volume at this "
+                            "version")
 
             elif r["type"] == "ATT::Valet::GroupAssignment":
                 vgroup = VGroup(self.stack_id, rk)
@@ -154,7 +154,7 @@ class Parser(object):
                     return {}, {}
                 vgroups[vgroup.uuid] = vgroup
                 msg = "group = %s, type = %s"
-                self.logger.info(msg % (vgroup.name, vgroup.vgroup_type))
+                LOG.info(msg % (vgroup.name, vgroup.vgroup_type))
 
         if self._merge_diversity_groups(_elements, vgroups, vms) is False:
             return {}, {}
diff --git a/valet/engine/optimizer/db_connect/music_handler.py b/valet/engine/optimizer/db_connect/music_handler.py
index e3f7115..3f58d2d 100644
--- a/valet/engine/optimizer/db_connect/music_handler.py
+++ b/valet/engine/optimizer/db_connect/music_handler.py
@@ -17,9 +17,14 @@
 
 import json
 import operator
+
+from oslo_log import log
+
 from valet.common.music import Music
 from valet.engine.optimizer.db_connect.event import Event
 
+LOG = log.getLogger(__name__)
+
 
 def ensurekey(d, k):
     return d.setdefault(k, {})
@@ -33,21 +38,19 @@ class MusicHandler(object):
     database for valet and returns/deletes/updates objects within it.
     """
 
-    def __init__(self, _config, _logger):
+    def __init__(self, _config):
         """Init Music Handler."""
         self.config = _config
-        self.logger = _logger
 
         self.music = Music(
             hosts=self.config.hosts, port=self.config.port,
             replication_factor=self.config.replication_factor,
-            music_server_retries=self.config.music_server_retries,
-            logger=self.logger)
+            music_server_retries=self.config.music_server_retries)
         if self.config.hosts is not None:
-            self.logger.info("DB: music host = %s", self.config.hosts)
+            LOG.info("DB: music host = %s", self.config.hosts)
         if self.config.replication_factor is not None:
-            self.logger.info("DB: music replication factor = %s ",
-                             str(self.config.replication_factor))
+            LOG.info("DB: music replication factor = %s ",
+                     str(self.config.replication_factor))
 
     # FIXME(GJ): this may not need
     def init_db(self):
@@ -57,12 +60,12 @@ class MusicHandler(object):
         necessary tables with the proper schemas in Music using API calls.
         Return True if no exceptions are caught.
         """
-        self.logger.info("MusicHandler.init_db: create table")
+        LOG.info("MusicHandler.init_db: create table")
 
         try:
             self.music.create_keyspace(self.config.db_keyspace)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create keyspace: " + str(e))
             return False
 
         schema = {
@@ -74,7 +77,7 @@ class MusicHandler(object):
             self.music.create_table(self.config.db_keyspace,
                                     self.config.db_request_table, schema)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create request table: " + str(e))
             return False
 
         schema = {
@@ -86,7 +89,7 @@ class MusicHandler(object):
             self.music.create_table(self.config.db_keyspace,
                                     self.config.db_response_table, schema)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create response table: " + str(e))
             return False
 
         schema = {
@@ -100,7 +103,7 @@ class MusicHandler(object):
             self.music.create_table(self.config.db_keyspace,
                                     self.config.db_event_table, schema)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create event table: " + str(e))
             return False
 
         schema = {
@@ -112,7 +115,7 @@ class MusicHandler(object):
             self.music.create_table(self.config.db_keyspace,
                                     self.config.db_resource_table, schema)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create resource table: " + str(e))
             return False
 
         schema = {
@@ -124,7 +127,7 @@ class MusicHandler(object):
             self.music.create_table(self.config.db_keyspace,
                                     self.config.db_app_table, schema)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create app table: " + str(e))
             return False
 
         schema = {
@@ -137,7 +140,7 @@ class MusicHandler(object):
             self.music.create_table(self.config.db_keyspace,
                                     self.config.db_uuid_table, schema)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create uuid table: " + str(e))
             return False
 
         return True
@@ -157,7 +160,7 @@ class MusicHandler(object):
             events = self.music.read_all_rows(self.config.db_keyspace,
                                               self.config.db_event_table)
         except Exception as e:
-            self.logger.error("DB:event: " + str(e))
+            LOG.error("DB:event: " + str(e))
             # FIXME(GJ): return None?
             return {}
 
@@ -168,13 +171,13 @@ class MusicHandler(object):
                 method = row['method']
                 args_data = row['args']
 
-                self.logger.debug("MusicHandler.get_events: event (" +
-                                  event_id + ") is entered")
+                LOG.debug("MusicHandler.get_events: event (" +
+                          event_id + ") is entered")
 
                 if exchange != "nova":
                     if self.delete_event(event_id) is False:
                         return None
-                    self.logger.debug(
+                    LOG.debug(
                         "MusicHandler.get_events: event exchange "
                         "(" + exchange + ") is not supported")
                     continue
@@ -183,23 +186,23 @@ class MusicHandler(object):
                                                            'instance':
                     if self.delete_event(event_id) is False:
                         return None
-                    self.logger.debug("MusicHandler.get_events: event method "
-                                      "(" + method + ") is not considered")
+                    LOG.debug("MusicHandler.get_events: event method "
+                              "(" + method + ") is not considered")
                     continue
 
                 if len(args_data) == 0:
                     if self.delete_event(event_id) is False:
                         return None
-                    self.logger.debug("MusicHandler.get_events: "
-                                      "event does not have args")
+                    LOG.debug("MusicHandler.get_events: "
+                              "event does not have args")
                     continue
 
                 try:
                     args = json.loads(args_data)
                 except (ValueError, KeyError, TypeError):
-                    self.logger.warn("MusicHandler.get_events: error while "
-                                     "decoding to JSON event = " + method +
-                                     ":" + event_id)
+                    LOG.warning("MusicHandler.get_events: error while "
+                                "decoding to JSON event = " + method +
+                                ":" + event_id)
                     continue
 
                 # TODO(lamt) this block of code can use refactoring
@@ -227,11 +230,11 @@ class MusicHandler(object):
                                             event_list.append(e)
                                         else:
                                             msg = "unknown vm_state = %s"
-                                            self.logger.warn(
+                                            LOG.warning(
                                                 msg % change_data["vm_state"])
                                             if 'uuid' in change_data.keys():
                                                 msg = "    uuid = %s"
-                                                self.logger.warn(
+                                                LOG.warning(
                                                     msg % change_data['uuid'])
                                             if not self.delete_event(event_id):
                                                 return None
@@ -296,20 +299,20 @@ class MusicHandler(object):
                        e.host is None or e.host == "none" or \
                        e.vcpus == -1 or e.mem == -1:
                         error_event_list.append(e)
-                        self.logger.warn("MusicHandler.get_events: data "
-                                         "missing in instance object event")
+                        LOG.warning("MusicHandler.get_events: data "
+                                    "missing in instance object event")
 
                 elif e.object_name == 'ComputeNode':
                     if e.host is None or e.host == "none":
                         error_event_list.append(e)
-                        self.logger.warn("MusicHandler.get_events: data "
-                                         "missing in compute object event")
+                        LOG.warning("MusicHandler.get_events: data "
+                                    "missing in compute object event")
 
             elif e.method == "build_and_run_instance":
                 if e.uuid is None or e.uuid == "none":
                     error_event_list.append(e)
-                    self.logger.warn("MusicHandler.get_events: data missing "
-                                     "in build event")
+                    LOG.warning("MusicHandler.get_events: data missing "
+                                "in build event")
 
         if len(error_event_list) > 0:
             event_list[:] = [
@@ -327,7 +330,7 @@ class MusicHandler(object):
                                              self.config.db_event_table,
                                              'timestamp', _event_id)
         except Exception as e:
-            self.logger.error("DB: while deleting event: " + str(e))
+            LOG.error("DB: while deleting event: " + str(e))
             return False
 
         return True
@@ -342,7 +345,7 @@ class MusicHandler(object):
             row = self.music.read_row(self.config.db_keyspace,
                                       self.config.db_uuid_table, 'uuid', _uuid)
         except Exception as e:
-            self.logger.error("DB: while reading uuid: " + str(e))
+            LOG.error("DB: while reading uuid: " + str(e))
             return None
 
         if len(row) > 0:
@@ -376,7 +379,7 @@ class MusicHandler(object):
             self.music.create_row(self.config.db_keyspace,
                                   self.config.db_uuid_table, data)
         except Exception as e:
-            self.logger.error("DB: while inserting uuid: " + str(e))
+            LOG.error("DB: while inserting uuid: " + str(e))
             return False
 
         return True
@@ -388,7 +391,7 @@ class MusicHandler(object):
                                              self.config.db_uuid_table, 'uuid',
                                              _k)
         except Exception as e:
-            self.logger.error("DB: while deleting uuid: " + str(e))
+            LOG.error("DB: while deleting uuid: " + str(e))
             return False
 
         return True
@@ -402,16 +405,15 @@ class MusicHandler(object):
             requests = self.music.read_all_rows(self.config.db_keyspace,
                                                 self.config.db_request_table)
         except Exception as e:
-            self.logger.error("DB: while reading requests: " + str(e))
+            LOG.error("DB: while reading requests: " + str(e))
             # FIXME(GJ): return None?
             return {}
 
         if len(requests) > 0:
-            self.logger.info("MusicHandler.get_requests: placement request "
-                             "arrived")
+            LOG.info("MusicHandler.get_requests: placement request arrived")
 
             for _, row in requests.iteritems():
-                self.logger.info("    request_id = " + row['stack_id'])
+                LOG.info("    request_id = " + row['stack_id'])
 
                 r_list = json.loads(row['request'])
                 for r in r_list:
@@ -431,8 +433,8 @@ class MusicHandler(object):
                 self.music.create_row(self.config.db_keyspace,
                                       self.config.db_response_table, data)
             except Exception as e:
-                self.logger.error("MUSIC error while putting placement "
-                                  "result: " + str(e))
+                LOG.error("MUSIC error while putting placement "
+                          "result: " + str(e))
                 return False
 
         for appk in _result.keys():
@@ -441,8 +443,8 @@ class MusicHandler(object):
                                                  self.config.db_request_table,
                                                  'stack_id', appk)
             except Exception as e:
-                self.logger.error("MUSIC error while deleting handled "
-                                  "request: " + str(e))
+                LOG.error("MUSIC error while deleting handled "
+                          "request: " + str(e))
                 return False
 
         return True
@@ -455,10 +457,10 @@ class MusicHandler(object):
         try:
             row = self.music.read_row(self.config.db_keyspace,
                                       self.config.db_resource_table,
-                                      'site_name', _k, self.logger)
+                                      'site_name', _k)
         except Exception as e:
-            self.logger.error("MUSIC error while reading resource status: " +
-                              str(e))
+            LOG.error("MUSIC error while reading resource status: " +
+                      str(e))
             return None
 
         if len(row) > 0:
@@ -475,8 +477,8 @@ class MusicHandler(object):
                                       self.config.db_resource_table,
                                       'site_name', _k)
         except Exception as e:
-            self.logger.error("MUSIC error while reading resource status: " +
-                              str(e))
+            LOG.error("MUSIC error while reading resource status: " +
+                      str(e))
             return False
 
         json_resource = {}
@@ -525,8 +527,8 @@ class MusicHandler(object):
                                                  self.config.db_resource_table,
                                                  'site_name', _k)
             except Exception as e:
-                self.logger.error("MUSIC error while deleting resource "
-                                  "status: " + str(e))
+                LOG.error("MUSIC error while deleting resource "
+                          "status: " + str(e))
                 return False
 
         else:
@@ -541,10 +543,10 @@ class MusicHandler(object):
             self.music.create_row(self.config.db_keyspace,
                                   self.config.db_resource_table, data)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not create row in resource table: " + str(e))
             return False
 
-        self.logger.info("DB: resource status updated")
+        LOG.info("DB: resource status updated")
 
         return True
 
@@ -555,7 +557,7 @@ class MusicHandler(object):
                 self.config.db_keyspace, self.config.db_app_table,
                 'stack_id', _k)
         except Exception as e:
-            self.logger.error("DB: while deleting app: " + str(e))
+            LOG.error("DB: while deleting app: " + str(e))
             return False
 
         if _app_data is not None:
@@ -568,7 +570,7 @@ class MusicHandler(object):
                 self.music.create_row(self.config.db_keyspace,
                                       self.config.db_app_table, data)
             except Exception as e:
-                self.logger.error("DB: while inserting app: " + str(e))
+                LOG.error("DB: while inserting app: " + str(e))
                 return False
 
         return True
@@ -583,7 +585,7 @@ class MusicHandler(object):
                                       self.config.db_app_table, 'stack_id',
                                       _s_uuid)
         except Exception as e:
-            self.logger.error("DB: while reading app info: " + str(e))
+            LOG.error("DB: while reading app info: " + str(e))
             return None
 
         if len(row) > 0:
@@ -606,7 +608,7 @@ class MusicHandler(object):
                                       self.config.db_app_table, 'stack_id',
                                       _s_uuid)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not read row in app table: " + str(e))
             return None
 
         if len(row) > 0:
@@ -620,25 +622,25 @@ class MusicHandler(object):
                         if vm["host"] != _host:
                             vm["planned_host"] = vm["host"]
                             vm["host"] = _host
-                            self.logger.warn("db: conflicted placement "
-                                             "decision from Ostro")
+                            LOG.warning("DB: conflicted placement "
+                                        "decision from Ostro")
                             # TODO(GY): affinity, diversity, exclusivity
                             # validation check
                             updated = True
                     else:
                         vm["status"] = "scheduled"
-                        self.logger.warn("DB: vm was deleted")
+                        LOG.warning("DB: vm was deleted")
                         updated = True
 
                     vm_info = vm
                     break
             else:
-                self.logger.error("MusicHandler.get_vm_info: vm is missing "
-                                  "from stack")
+                LOG.error("MusicHandler.get_vm_info: vm is missing "
+                          "from stack")
 
         else:
-            self.logger.warn("MusicHandler.get_vm_info: not found stack for "
-                             "update = " + _s_uuid)
+            LOG.warning("MusicHandler.get_vm_info: not found stack for "
+                        "update = " + _s_uuid)
 
         if updated is True:
             if self.add_app(_s_uuid, json_app) is False:
@@ -657,7 +659,7 @@ class MusicHandler(object):
                                       self.config.db_app_table, 'stack_id',
                                       _s_uuid)
         except Exception as e:
-            self.logger.error("DB: " + str(e))
+            LOG.error("DB could not read row in app table: " + str(e))
             return False
 
         if len(row) > 0:
@@ -669,18 +671,18 @@ class MusicHandler(object):
                 if vmk == _h_uuid:
                     if vm["status"] != "deleted":
                         vm["status"] = "deleted"
-                        self.logger.warn("DB: deleted marked")
+                        LOG.warning("DB: deleted marked")
                         updated = True
                     else:
-                        self.logger.warn("DB: vm was already deleted")
+                        LOG.warning("DB: vm was already deleted")
 
                     break
             else:
-                self.logger.error("MusicHandler.update_vm_info: vm is missing "
-                                  "from stack")
+                LOG.error("MusicHandler.update_vm_info: vm is missing "
+                          "from stack")
         else:
-            self.logger.warn("MusicHandler.update_vm_info: not found "
-                             "stack for update = " + _s_uuid)
+            LOG.warning("MusicHandler.update_vm_info: not found "
+                        "stack for update = " + _s_uuid)
 
         if updated is True:
             if self.add_app(_s_uuid, json_app) is False:
diff --git a/valet/engine/optimizer/ostro/constraint_solver.py b/valet/engine/optimizer/ostro/constraint_solver.py
index 8742af4..00cb241 100644
--- a/valet/engine/optimizer/ostro/constraint_solver.py
+++ b/valet/engine/optimizer/ostro/constraint_solver.py
@@ -12,6 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from oslo_log import log
 
 from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
 from valet.engine.optimizer.app_manager.app_topology_base import VGroup
@@ -24,20 +25,21 @@ from valet.engine.optimizer.ostro.openstack_filters import CoreFilter
 from valet.engine.optimizer.ostro.openstack_filters import DiskFilter
 from valet.engine.optimizer.ostro.openstack_filters import RamFilter
 
+LOG = log.getLogger(__name__)
+
 
 class ConstraintSolver(object):
     """ConstraintSolver."""
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Initialization."""
         """Instantiate filters to help enforce constraints."""
-        self.logger = _logger
 
-        self.openstack_AZ = AvailabilityZoneFilter(self.logger)
-        self.openstack_AIES = AggregateInstanceExtraSpecsFilter(self.logger)
-        self.openstack_R = RamFilter(self.logger)
-        self.openstack_C = CoreFilter(self.logger)
-        self.openstack_D = DiskFilter(self.logger)
+        self.openstack_AZ = AvailabilityZoneFilter()
+        self.openstack_AIES = AggregateInstanceExtraSpecsFilter()
+        self.openstack_R = RamFilter()
+        self.openstack_C = CoreFilter()
+        self.openstack_D = DiskFilter()
 
         self.status = "success"
 
@@ -57,11 +59,11 @@ class ConstraintSolver(object):
                 candidate_list.append(r)
         if len(candidate_list) == 0:
             self.status = "no candidate for node = " + _n.node.name
-            self.logger.warn(self.status)
+            LOG.warning(self.status)
             return candidate_list
         else:
-            self.logger.debug("ConstraintSolver: num of candidates = " +
-                              str(len(candidate_list)))
+            LOG.debug("ConstraintSolver: num of candidates = " +
+                      str(len(candidate_list)))
 
         """Availability zone constraint."""
         if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
@@ -72,7 +74,7 @@ class ConstraintSolver(object):
                 if len(candidate_list) == 0:
                     self.status = "violate availability zone constraint for " \
                                   "node = " + _n.node.name
-                    self.logger.error("ConstraintSolver: " + self.status)
+                    LOG.error("ConstraintSolver: " + self.status)
                     return candidate_list
 
         """Host aggregate constraint."""
@@ -82,7 +84,7 @@ class ConstraintSolver(object):
                 if len(candidate_list) == 0:
                     self.status = "violate host aggregate constraint for " \
                                   "node = " + _n.node.name
-                    self.logger.error("ConstraintSolver: " + self.status)
+                    LOG.error("ConstraintSolver: " + self.status)
                     return candidate_list
 
         """CPU capacity constraint."""
@@ -91,7 +93,7 @@ class ConstraintSolver(object):
             if len(candidate_list) == 0:
                 self.status = "violate cpu capacity constraint for " \
                               "node = " + _n.node.name
-                self.logger.error("ConstraintSolver: " + self.status)
+                LOG.error("ConstraintSolver: " + self.status)
                 return candidate_list
 
         """Memory capacity constraint."""
@@ -100,7 +102,7 @@ class ConstraintSolver(object):
             if len(candidate_list) == 0:
                 self.status = "violate memory capacity constraint for " \
                               "node = " + _n.node.name
-                self.logger.error("ConstraintSolver: " + self.status)
+                LOG.error("ConstraintSolver: " + self.status)
                 return candidate_list
 
         """Local disk capacity constraint."""
@@ -109,7 +111,7 @@ class ConstraintSolver(object):
             if len(candidate_list) == 0:
                 self.status = "violate local disk capacity constraint for " \
                               "node = " + _n.node.name
-                self.logger.error("ConstraintSolver: " + self.status)
+                LOG.error("ConstraintSolver: " + self.status)
                 return candidate_list
 
         """ diversity constraint """
@@ -125,7 +127,7 @@ class ConstraintSolver(object):
             if len(candidate_list) == 0:
                 self.status = "violate diversity constraint for " \
                               "node = " + _n.node.name
-                self.logger.error("ConstraintSolver: " + self.status)
+                LOG.error("ConstraintSolver: " + self.status)
                 return candidate_list
             else:
                 self._constrain_diversity(_level, _n, _node_placements,
@@ -133,7 +135,7 @@ class ConstraintSolver(object):
                 if len(candidate_list) == 0:
                     self.status = "violate diversity constraint for " \
                                   "node = " + _n.node.name
-                    self.logger.error("ConstraintSolver: " + self.status)
+                    LOG.error("ConstraintSolver: " + self.status)
                     return candidate_list
 
         """Exclusivity constraint."""
@@ -142,7 +144,7 @@ class ConstraintSolver(object):
         if len(exclusivities) > 1:
             self.status = "violate exclusivity constraint (more than one " \
                           "exclusivity) for node = " + _n.node.name
-            self.logger.error("ConstraintSolver: " + self.status)
+            LOG.error("ConstraintSolver: " + self.status)
             return []
         else:
             if len(exclusivities) == 1:
@@ -153,14 +155,14 @@ class ConstraintSolver(object):
                     if len(candidate_list) == 0:
                         self.status = "violate exclusivity constraint for " \
                                       "node = " + _n.node.name
-                        self.logger.error("ConstraintSolver: " + self.status)
+                        LOG.error("ConstraintSolver: " + self.status)
                         return candidate_list
             else:
                 self._constrain_non_exclusivity(_level, candidate_list)
                 if len(candidate_list) == 0:
                     self.status = "violate non-exclusivity constraint for " \
                                   "node = " + _n.node.name
-                    self.logger.error("ConstraintSolver: " + self.status)
+                    LOG.error("ConstraintSolver: " + self.status)
                     return candidate_list
 
         """Affinity constraint."""
@@ -173,7 +175,7 @@ class ConstraintSolver(object):
                     if len(candidate_list) == 0:
                         self.status = "violate affinity constraint for " \
                                       "node = " + _n.node.name
-                        self.logger.error("ConstraintSolver: " + self.status)
+                        LOG.error("ConstraintSolver: " + self.status)
                         return candidate_list
 
         return candidate_list
diff --git a/valet/engine/optimizer/ostro/openstack_filters.py b/valet/engine/optimizer/ostro/openstack_filters.py
index 88f1867..faeddd2 100644
--- a/valet/engine/optimizer/ostro/openstack_filters.py
+++ b/valet/engine/optimizer/ostro/openstack_filters.py
@@ -27,9 +27,8 @@ class AggregateInstanceExtraSpecsFilter(object):
     # Aggregate data and instance type does not change within a request
     run_filter_once_per_request = True
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Initialization."""
-        self.logger = _logger
 
     def host_passes(self, _level, _host, _v):
         """Return a list of hosts that can create instance_type."""
@@ -109,9 +108,8 @@ class AvailabilityZoneFilter(object):
     # Availability zones do not change within a request
     run_filter_once_per_request = True
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Initialization."""
-        self.logger = _logger
 
     def host_passes(self, _level, _host, _v):
         """Return True if all availalibility zones in _v exist in the host."""
@@ -138,9 +136,8 @@ class AvailabilityZoneFilter(object):
 class RamFilter(object):
     """RamFilter."""
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Initialization."""
-        self.logger = _logger
 
     def host_passes(self, _level, _host, _v):
         """Return True if host has sufficient available RAM."""
@@ -161,9 +158,8 @@ class RamFilter(object):
 class CoreFilter(object):
     """CoreFilter."""
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Initialization."""
-        self.logger = _logger
 
     def host_passes(self, _level, _host, _v):
         """Return True if host has sufficient CPU cores."""
@@ -185,9 +181,8 @@ class CoreFilter(object):
 class DiskFilter(object):
     """DiskFilter."""
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Initialization."""
-        self.logger = _logger
 
     def host_passes(self, _level, _host, _v):
         """Filter based on disk usage."""
diff --git a/valet/engine/optimizer/ostro/optimizer.py b/valet/engine/optimizer/ostro/optimizer.py
index 67ffd3c..37602c4 100644
--- a/valet/engine/optimizer/ostro/optimizer.py
+++ b/valet/engine/optimizer/ostro/optimizer.py
@@ -12,22 +12,25 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from oslo_log import log
+
 from valet.engine.optimizer.app_manager.app_topology_base import VGroup
 from valet.engine.optimizer.app_manager.app_topology_base import VM
 from valet.engine.optimizer.ostro.search import Search
 
+LOG = log.getLogger(__name__)
+
 
 # FIXME(GJ): make search algorithm pluggable
 # NOTE(GJ): do not deal with Volume placements at this version
 class Optimizer(object):
     """Optimizer."""
 
-    def __init__(self, _resource, _logger):
+    def __init__(self, _resource):
         """Initialization."""
         self.resource = _resource
-        self.logger = _logger
 
-        self.search = Search(self.logger)
+        self.search = Search()
 
         self.status = "success"
 
@@ -80,8 +83,7 @@ class Optimizer(object):
                     elif v.level == "cluster":
                         placement_map[v] = node_placement.cluster_name
 
-                self.logger.debug("    " + v.name + " placed in " +
-                                  placement_map[v])
+                LOG.debug(v.name + " placed in " + placement_map[v])
 
             self._update_resource_status(uuid_map)
 
diff --git a/valet/engine/optimizer/ostro/ostro.py b/valet/engine/optimizer/ostro/ostro.py
index fa8f6cf..051c35c 100644
--- a/valet/engine/optimizer/ostro/ostro.py
+++ b/valet/engine/optimizer/ostro/ostro.py
@@ -12,13 +12,13 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-"""Valet Engine."""
-
-from oslo_config import cfg
 import threading
 import time
 import traceback
+
+from oslo_config import cfg
+from oslo_log import log
+
 from valet.engine.listener.listener_manager import ListenerManager
 from valet.engine.optimizer.app_manager.app_handler import AppHandler
 from valet.engine.optimizer.app_manager.app_topology_base import VM
@@ -29,37 +29,34 @@ from valet.engine.resource_manager.resource import Resource
 from valet.engine.resource_manager.topology_manager import TopologyManager
 
 CONF = cfg.CONF
+LOG = log.getLogger(__name__)
 
 
 class Ostro(object):
     """Valet Engine."""
 
-    def __init__(self, _config, _logger):
+    def __init__(self, _config):
         """Initialization."""
         self.config = _config
-        self.logger = _logger
 
-        self.db = MusicHandler(self.config, self.logger)
+        self.db = MusicHandler(self.config)
         if self.db.init_db() is False:
-            self.logger.error("error while initializing MUSIC database")
+            LOG.error("error while initializing MUSIC database")
 
-        self.resource = Resource(self.db, self.config, self.logger)
-
-        self.app_handler = AppHandler(self.resource, self.db, self.config,
-                                      self.logger)
-
-        self.optimizer = Optimizer(self.resource, self.logger)
+        self.resource = Resource(self.db, self.config)
+        self.app_handler = AppHandler(self.resource, self.db, self.config)
+        self.optimizer = Optimizer(self.resource)
 
         self.data_lock = threading.Lock()
         self.thread_list = []
 
         self.topology = TopologyManager(
             1, "Topology", self.resource,
-            self.data_lock, self.config, self.logger)
+            self.data_lock, self.config)
 
         self.compute = ComputeManager(
             2, "Compute", self.resource,
-            self.data_lock, self.config, self.logger)
+            self.data_lock, self.config)
 
         self.listener = ListenerManager(3, "Listener", CONF)
 
@@ -69,7 +66,7 @@ class Ostro(object):
         self.batch_store_trigger = 10  # sec
 
     def run_ostro(self):
-        self.logger.info("start Ostro ......")
+        LOG.info("start Ostro ......")
 
         self.topology.start()
         self.compute.start()
@@ -114,7 +111,7 @@ class Ostro(object):
         for t in self.thread_list:
             t.join()
 
-        self.logger.info("exit Ostro")
+        LOG.info("exit Ostro")
 
     def stop_ostro(self):
         """Stop main engine process."""
@@ -131,22 +128,22 @@ class Ostro(object):
 
     def bootstrap(self):
         """Start bootstrap and update the engine's resource topology."""
-        self.logger.info("Ostro.bootstrap: start bootstrap")
+        LOG.info("Ostro.bootstrap: start bootstrap")
 
         try:
             resource_status = self.db.get_resource_status(
                 self.resource.datacenter.name)
             if resource_status is None:
-                self.logger.error("failed to read from table: %s" %
-                                  self.config.db_resource_table)
+                LOG.error("failed to read from table: %s" %
+                          self.config.db_resource_table)
                 return False
 
             if len(resource_status) > 0:
-                self.logger.info("bootstrap from DB")
+                LOG.info("bootstrap from DB")
                 if not self.resource.bootstrap_from_db(resource_status):
-                    self.logger.error("failed to parse bootstrap data!")
+                    LOG.error("failed to parse bootstrap data!")
 
-            self.logger.info("bootstrap from OpenStack")
+            LOG.info("bootstrap from OpenStack")
             if not self._set_hosts():
                 return False
 
@@ -159,42 +156,42 @@ class Ostro(object):
             self.resource.update_topology()
 
         except Exception:
-            self.logger.critical("Ostro.bootstrap failed: %s" %
-                                 traceback.format_exc())
+            LOG.critical("Ostro.bootstrap failed: ",
+                         traceback.format_exc())
 
-        self.logger.info("done bootstrap")
+        LOG.info("done bootstrap")
 
         return True
 
     def _set_topology(self):
         if not self.topology.set_topology():
-            self.logger.error("failed to read datacenter topology")
+            LOG.error("failed to read datacenter topology")
             return False
 
-        self.logger.info("done topology bootstrap")
+        LOG.info("done topology bootstrap")
         return True
 
     def _set_hosts(self):
         if not self.compute.set_hosts():
-            self.logger.error("failed to read hosts from OpenStack (Nova)")
+            LOG.error("failed to read hosts from OpenStack (Nova)")
             return False
 
-        self.logger.info("done hosts & groups bootstrap")
+        LOG.info("done hosts & groups bootstrap")
         return True
 
     def _set_flavors(self):
         if not self.compute.set_flavors():
-            self.logger.error("failed to read flavors from OpenStack (Nova)")
+            LOG.error("failed to read flavors from OpenStack (Nova)")
             return False
 
-        self.logger.info("done flavors bootstrap")
+        LOG.info("done flavors bootstrap")
         return True
 
     # TODO(GJ): evaluate delay
     def place_app(self, _app_data):
         for req in _app_data:
             if req["action"] == "query":
-                self.logger.info("start query")
+                LOG.info("start query")
 
                 query_result = self._query(req)
                 result = self._get_json_results("query", "ok",
@@ -203,9 +200,9 @@ class Ostro(object):
                 if not self.db.put_result(result):
                     return False
 
-                self.logger.info("done query")
+                LOG.info("done query")
             else:
-                self.logger.info("start app placement")
+                LOG.info("start app placement")
 
                 result = None
                 (decision_key, old_decision) = self.app_handler.check_history(
@@ -221,14 +218,13 @@ class Ostro(object):
                     if decision_key is not None:
                         self.app_handler.put_history(decision_key, result)
                 else:
-                    self.logger.warn("decision(%s) already made" %
-                                     decision_key)
+                    LOG.info("decision(%s) already made" % decision_key)
                     result = old_decision
 
                 if not self.db.put_result(result):
                     return False
 
-                self.logger.info("done app placement")
+                LOG.info("done app placement")
 
         return True
 
@@ -247,11 +243,11 @@ class Ostro(object):
                         query_result[_q["stack_id"]] = vm_list
                     else:
                         self.status = "unknown paramenter in query"
-                        self.logger.warn("unknown paramenter in query")
+                        LOG.warning("unknown paramenter in query")
                         query_result[_q["stack_id"]] = None
                 else:
                     self.status = "no paramenter in query"
-                    self.logger.warn("no parameters in query")
+                    LOG.warning("no parameters in query")
                     query_result[_q["stack_id"]] = None
             elif _q["type"] == "all_groups":
                 self.data_lock.acquire()
@@ -259,11 +255,11 @@ class Ostro(object):
                 self.data_lock.release()
             else:
                 self.status = "unknown query type"
-                self.logger.warn("unknown query type")
+                LOG.warning("unknown query type")
                 query_result[_q["stack_id"]] = None
         else:
             self.status = "unknown type in query"
-            self.logger.warn("no type in query")
+            LOG.warning("no type in query")
             query_result[_q["stack_id"]] = None
 
         return query_result
@@ -284,7 +280,7 @@ class Ostro(object):
             if vm_id[2] != "none":   # if physical_uuid != 'none'
                 vm_list.append(vm_id[2])
             else:
-                self.logger.warn("found pending vms in this group while query")
+                LOG.warning("found pending vms in this group while query")
 
         return vm_list
 
@@ -301,20 +297,20 @@ class Ostro(object):
         app_topology = self.app_handler.add_app(_app)
         if app_topology is None:
             self.status = self.app_handler.status
-            self.logger.error("Ostro._place_app: error while register"
-                              "requested apps: " + self.app_handler.status)
+            LOG.error("Ostro._place_app: error while register"
+                      "requested apps: " + self.app_handler.status)
             return None
 
         """Check and set vm flavor information."""
         for _, vm in app_topology.vms.iteritems():
             if self._set_vm_flavor_information(vm) is False:
                 self.status = "fail to set flavor information"
-                self.logger.error(self.status)
+                LOG.error(self.status)
                 return None
         for _, vg in app_topology.vgroups.iteritems():
             if self._set_vm_flavor_information(vg) is False:
                 self.status = "fail to set flavor information in a group"
-                self.logger.error(self.status)
+                LOG.error(self.status)
                 return None
 
         self.data_lock.acquire()
@@ -359,8 +355,8 @@ class Ostro(object):
         flavor = self.resource.get_flavor(_vm.flavor)
 
         if flavor is None:
-            self.logger.warn("Ostro._set_vm_flavor_properties: does not exist "
-                             "flavor (" + _vm.flavor + ") and try to refetch")
+            LOG.warning("Ostro._set_vm_flavor_properties: does not exist "
+                        "flavor (" + _vm.flavor + ") and try to refetch")
 
             # Reset flavor resource and try again
             if self._set_flavors() is False:
@@ -395,14 +391,14 @@ class Ostro(object):
         for e in _event_list:
             if e.host is not None and e.host != "none":
                 if self._check_host(e.host) is False:
-                    self.logger.warn("Ostro.handle_events: host (" + e.host +
-                                     ") related to this event not exists")
+                    LOG.warning("Ostro.handle_events: host (" + e.host +
+                                ") related to this event not exists")
                     continue
 
             if e.method == "build_and_run_instance":
                 # VM is created (from stack)
-                self.logger.info("Ostro.handle_events: got build_and_run "
-                                 "event for %s" % e.uuid)
+                LOG.info("Ostro.handle_events: got build_and_run "
+                         "event for %s" % e.uuid)
                 if self.db.put_uuid(e) is False:
                     self.data_lock.release()
                     return False
@@ -417,22 +413,22 @@ class Ostro(object):
                         return False
 
                     if e.vm_state == "active":
-                        self.logger.info("Ostro.handle_events: got instance_"
-                                         "active event for " + e.uuid)
+                        LOG.info("Ostro.handle_events: got instance_"
+                                 "active event for " + e.uuid)
                         vm_info = self.app_handler.get_vm_info(
                             orch_id[1], orch_id[0], e.host)
                         if vm_info is None:
-                            self.logger.error("Ostro.handle_events: error "
-                                              "while getting app info "
-                                              "from MUSIC")
+                            LOG.error("Ostro.handle_events: error "
+                                      "while getting app info "
+                                      "from MUSIC")
                             self.data_lock.release()
                             return False
 
                         if len(vm_info) == 0:
                             # Stack not found because vm is created by the
                             # other stack
-                            self.logger.warn("EVENT: no vm_info found in app "
-                                             "placement record")
+                            LOG.warning("EVENT: no vm_info found in app "
+                                        "placement record")
                             self._add_vm_to_host(
                                 e.uuid, orch_id[0], e.host, e.vcpus,
                                 e.mem, e.local_disk)
@@ -440,8 +436,8 @@ class Ostro(object):
                             if ("planned_host" in vm_info.keys() and
                                     vm_info["planned_host"] != e.host):
                                 # VM is activated in the different host
-                                self.logger.warn("EVENT: vm activated in the "
-                                                 "different host")
+                                LOG.warning("EVENT: vm activated in the "
+                                            "different host")
                                 self._add_vm_to_host(
                                     e.uuid, orch_id[0], e.host, e.vcpus,
                                     e.mem, e.local_disk)
@@ -461,8 +457,8 @@ class Ostro(object):
                                 # Possibly the vm deleted in the host while
                                 # batch cleanup
                                 if not self._check_h_uuid(orch_id[0], e.host):
-                                    self.logger.warn("EVENT: planned vm was "
-                                                     "deleted")
+                                    LOG.warning("EVENT: planned vm was "
+                                                "deleted")
                                     if self._check_uuid(e.uuid, e.host):
                                         self._update_h_uuid_in_host(orch_id[0],
                                                                     e.uuid,
@@ -470,7 +466,7 @@ class Ostro(object):
                                         self._update_h_uuid_in_logical_groups(
                                             orch_id[0], e.uuid, e.host)
                                 else:
-                                    self.logger.info(
+                                    LOG.info(
                                         "EVENT: vm activated as planned")
                                     self._update_uuid_in_host(
                                         orch_id[0], e.uuid, e.host)
@@ -480,8 +476,8 @@ class Ostro(object):
                         resource_updated = True
 
                     elif e.vm_state == "deleted":
-                        self.logger.info("EVENT: got instance_delete for %s" %
-                                         e.uuid)
+                        LOG.info("EVENT: got instance_delete for %s" %
+                                 e.uuid)
 
                         self._remove_vm_from_host(
                             e.uuid, orch_id[0], e.host, e.vcpus,
@@ -491,24 +487,24 @@ class Ostro(object):
 
                         if not self.app_handler.update_vm_info(
                                 orch_id[1], orch_id[0]):
-                            self.logger.error("EVENT: error while updating "
-                                              "app in MUSIC")
+                            LOG.error("EVENT: error while updating "
+                                      "app in MUSIC")
                             self.data_lock.release()
                             return False
 
                         resource_updated = True
 
                     else:
-                        self.logger.warn("Ostro.handle_events: unknown vm_"
-                                         "state = " + e.vm_state)
+                        LOG.warning("Ostro.handle_events: unknown vm_"
+                                    "state = " + e.vm_state)
 
                 elif e.object_name == 'ComputeNode':
                     # Host resource is updated
-                    self.logger.debug("Ostro.handle_events: got compute event")
+                    LOG.debug("Ostro.handle_events: got compute event")
 
                 elif e.object_name == 'ComputeNode':
                     # Host resource is updated
-                    self.logger.info("EVENT: got compute for " + e.host)
+                    LOG.info("EVENT: got compute for " + e.host)
                     # NOTE: what if host is disabled?
                     if self.resource.update_host_resources(
                             e.host, e.status, e.vcpus, e.vcpus_used, e.mem,
@@ -519,11 +515,11 @@ class Ostro(object):
                         resource_updated = True
 
                 else:
-                    self.logger.warn("Ostro.handle_events: unknown object_"
-                                     "name = " + e.object_name)
+                    LOG.warning("Ostro.handle_events: unknown object_"
+                                "name = " + e.object_name)
             else:
-                self.logger.warn("Ostro.handle_events: unknown event "
-                                 "method = " + e.method)
+                LOG.warning("Ostro.handle_events: unknown event "
+                            "method = " + e.method)
 
         if resource_updated is True:
             self.resource.update_topology(store=False)
@@ -577,7 +573,7 @@ class Ostro(object):
                                                           _local_disk)
                 self.resource.update_host_time(_host_name)
             else:
-                self.logger.warn("vm (%s) is missing while removing" % _uuid)
+                LOG.warning("vm (%s) is missing while removing" % _uuid)
 
     def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name):
         host = self.resource.hosts[_host_name]
@@ -618,8 +614,8 @@ class Ostro(object):
         if host.update_uuid(_h_uuid, _uuid) is True:
             self.resource.update_host_time(_host_name)
         else:
-            self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid "
-                             "in host = %s" % host.name)
+            LOG.warning("Ostro._update_uuid_in_host: fail to update uuid "
+                        "in host = %s" % host.name)
 
     def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name):
         host = self.resource.hosts[_host_name]
diff --git a/valet/engine/optimizer/ostro/search.py b/valet/engine/optimizer/ostro/search.py
index 1a2fca7..7d5f6de 100644
--- a/valet/engine/optimizer/ostro/search.py
+++ b/valet/engine/optimizer/ostro/search.py
@@ -15,6 +15,8 @@
 import copy
 import operator
 
+from oslo_log import log
+
 from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
 from valet.engine.optimizer.app_manager.app_topology_base import VGroup
 from valet.engine.optimizer.app_manager.app_topology_base import VM
@@ -24,13 +26,14 @@ from valet.engine.optimizer.ostro.search_base import Node
 from valet.engine.optimizer.ostro.search_base import Resource
 from valet.engine.resource_manager.resource_base import Datacenter
 
+LOG = log.getLogger(__name__)
+
 
 class Search(object):
     '''A bin-packing with maximal consolidation approach '''
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Initialization."""
-        self.logger = _logger
 
         # search inputs
         self.resource = None
@@ -92,9 +95,9 @@ class Search(object):
 
         self.resource = _resource
 
-        self.constraint_solver = ConstraintSolver(self.logger)
+        self.constraint_solver = ConstraintSolver()
 
-        self.logger.info("start search")
+        LOG.info("start search")
 
         self._create_avail_logical_groups()
         self._create_avail_hosts()
@@ -118,9 +121,9 @@ class Search(object):
         self.app_topology = _app_topology
         self.resource = _resource
 
-        self.constraint_solver = ConstraintSolver(self.logger)
+        self.constraint_solver = ConstraintSolver()
 
-        self.logger.info("start search for replan")
+        LOG.info("start search for replan")
 
         self._create_avail_logical_groups()
         self._create_avail_hosts()
@@ -130,7 +133,7 @@ class Search(object):
 
         self._compute_resource_weights()
 
-        self.logger.info("first, place already-planned nodes")
+        LOG.info("first, place already-planned nodes")
 
         # reconsider all vms to be migrated together
         if len(_app_topology.exclusion_list_map) > 0:
@@ -138,17 +141,17 @@ class Search(object):
 
         if self._place_planned_nodes() is False:
             self.status = "cannot replan VMs that was planned"
-            self.logger.error(self.status)
+            LOG.error(self.status)
             return False
 
-        self.logger.info("second, re-place not-planned nodes")
+        LOG.info("second, re-place not-planned nodes")
 
         init_level = LEVELS[len(LEVELS) - 1]
         (open_node_list, level) = self._open_list(self.app_topology.vms,
                                                   self.app_topology.vgroups,
                                                   init_level)
         if open_node_list is None:
-            self.logger.error("fail to replan")
+            LOG.error("fail to replan")
             return False
 
         for v, ah in self.planned_placements.iteritems():
@@ -169,8 +172,8 @@ class Search(object):
                     if vk in self.app_topology.planned_vm_map.keys():
                         del self.app_topology.planned_vm_map[vk]
             else:
-                self.logger.error("Search: migrated " + migrated_vm_id +
-                                  " is missing while replan")
+                LOG.error("Search: migrated " + migrated_vm_id +
+                          " is missing while replan")
 
     def _get_child_vms(self, _g, _vm_list, _e_vmk):
         for sgk, sg in _g.subvgroups.iteritems():
@@ -212,8 +215,8 @@ class Search(object):
                         vgroup.host = []
                     host_name = self._get_host_of_vgroup(hk, vgroup.level)
                     if host_name is None:
-                        self.logger.error("Search: host does not exist while "
-                                          "replan with vgroup")
+                        LOG.warning("Search: host does not exist while "
+                                    "replan with vgroup")
                     else:
                         if host_name not in vgroup.host:
                             vgroup.host.append(host_name)
@@ -301,7 +304,7 @@ class Search(object):
                 self._deduct_reservation(_level, best_resource, n)
                 self._close_planned_placement(_level, best_resource, n.node)
             else:
-                self.logger.error("fail to place already-planned VMs")
+                LOG.error("fail to place already-planned VMs")
                 return False
 
         return True
@@ -332,7 +335,7 @@ class Search(object):
 
             host_name = self._get_host_of_level(_n, _level)
             if host_name is None:
-                self.logger.warn("cannot find host while replanning")
+                LOG.warning("cannot find host while replanning")
                 return None
 
             avail_hosts = {}
@@ -385,8 +388,8 @@ class Search(object):
         for hk, host in self.resource.hosts.iteritems():
 
             if host.check_availability() is False:
-                self.logger.debug("Search: host (" + host.name +
-                                  ") not available at this time")
+                LOG.debug("Search: host (" + host.name +
+                          ") not available at this time")
                 continue
 
             r = Resource()
@@ -460,7 +463,7 @@ class Search(object):
         for lgk, lg in self.resource.logical_groups.iteritems():
 
             if lg.status != "enabled":
-                self.logger.warn("group (" + lg.name + ") disabled")
+                LOG.warning("group (" + lg.name + ") disabled")
                 continue
 
             lgr = LogicalGroupResource()
@@ -715,8 +718,8 @@ class Search(object):
                             if host_name not in host_list:
                                 host_list.append(host_name)
                         else:
-                            self.logger.warn("Search: cannot find candidate "
-                                             "host while replanning")
+                            LOG.warning("Search: cannot find candidate "
+                                        "host while replanning")
                     _n.node.host = host_list
 
         candidate_list = []
@@ -779,7 +782,7 @@ class Search(object):
                 else:
                     debug_candidate_name = cr.get_resource_name(_level)
                     msg = "rollback of candidate resource = {0}"
-                    self.logger.warn(msg.format(debug_candidate_name))
+                    LOG.warning(msg.format(debug_candidate_name))
 
                     if planned_host is None:
                         # recursively rollback deductions of all
@@ -792,7 +795,7 @@ class Search(object):
 
         if best_resource is None and len(candidate_list) == 0:
             self.status = "no available hosts"
-            self.logger.warn(self.status)
+            LOG.warning(self.status)
 
         return best_resource
 
@@ -858,7 +861,7 @@ class Search(object):
             lgr.group_type = "EX"
             self.avail_logical_groups[lgr.name] = lgr
 
-            self.logger.info(
+            LOG.info(
                 "Search: add new exclusivity (%s)" % _exclusivity_id)
 
         else:
@@ -913,7 +916,7 @@ class Search(object):
             lgr.group_type = "AFF"
             self.avail_logical_groups[lgr.name] = lgr
 
-            self.logger.info("add new affinity (" + _affinity_id + ")")
+            LOG.info("add new affinity (" + _affinity_id + ")")
         else:
             lgr = self.avail_logical_groups[_affinity_id]
 
@@ -963,8 +966,8 @@ class Search(object):
             lgr.group_type = "DIV"
             self.avail_logical_groups[lgr.name] = lgr
 
-            self.logger.info(
-                "Search: add new diversity (%s)" % _diversity_id)
+            LOG.info(
+                "Search: add new diversity (%s)", _diversity_id)
         else:
             lgr = self.avail_logical_groups[_diversity_id]
 
diff --git a/valet/engine/optimizer/ostro_server/daemon.py b/valet/engine/optimizer/ostro_server/daemon.py
index c1b3522..916fd8b 100644
--- a/valet/engine/optimizer/ostro_server/daemon.py
+++ b/valet/engine/optimizer/ostro_server/daemon.py
@@ -17,12 +17,15 @@
 
 import atexit
 import os
-from oslo_config import cfg
-from signal import SIGTERM
+import signal
 import sys
 import time
 
+from oslo_config import cfg
+from oslo_log import log
+
 CONF = cfg.CONF
+LOG = log.getLogger(__name__)
 
 
 class Daemon(object):
@@ -31,7 +34,7 @@ class Daemon(object):
     """Usage: subclass the Daemon class and override the run() method
     """
 
-    def __init__(self, priority, pidfile, logger, stdin='/dev/null',
+    def __init__(self, priority, pidfile, stdin='/dev/null',
                  stdout='/dev/null', stderr='/dev/null'):
         """Initialization."""
         self.stdin = stdin
@@ -39,7 +42,6 @@ class Daemon(object):
         self.stderr = stderr
         self.pidfile = pidfile
         self.priority = priority
-        self.logger = logger
 
     def daemonize(self):
         """Do the UNIX double-fork magic."""
@@ -53,9 +55,9 @@ class Daemon(object):
                 # exit first parent
                 sys.exit(0)
         except OSError as e:
-            self.logger.error("Daemon error at step1: " + e.strerror)
-            sys.stderr.write("fork #1 failed: %d (%s)\n" %
-                             (e.errno, e.strerror))
+            LOG.error("Daemon error at step1: ", e.strerror)
+            LOG.error("fork #1 failed: %d (%s)\n",
+                      e.errno, e.strerror)
             sys.exit(1)
 
         # decouple from parent environment
@@ -70,9 +72,9 @@ class Daemon(object):
                 # exit from second parent
                 sys.exit(0)
         except OSError as e:
-            self.logger.error("Daemon error at step2: " + e.strerror)
-            sys.stderr.write("fork #2 failed: %d (%s)\n" %
-                             (e.errno, e.strerror))
+            LOG.error("Daemon error at step2: ", e.strerror)
+            LOG.error("fork #2 failed: %d (%s)\n",
+                      e.errno, e.strerror)
             sys.exit(1)
 
         # redirect standard file descriptors
@@ -140,7 +142,7 @@ class Daemon(object):
         # Try killing the daemon process
         try:
             while 1:
-                os.kill(pid, SIGTERM)
+                os.kill(pid, signal.SIGTERM)
                 time.sleep(0.1)
         except OSError as err:
             err = str(err)
diff --git a/valet/engine/optimizer/ostro_server/health_checker.py b/valet/engine/optimizer/ostro_server/health_checker.py
index 9d6fa3d..6377d9f 100644
--- a/valet/engine/optimizer/ostro_server/health_checker.py
+++ b/valet/engine/optimizer/ostro_server/health_checker.py
@@ -26,6 +26,7 @@ from valet.common.music import REST
 from valet.engine.conf import init_engine
 
 CONF = cfg.CONF
+LOG = get_logger(__name__)
 
 
 class HealthCheck(object):
@@ -107,7 +108,7 @@ class HealthCheck(object):
                     engine_id = placement['resources']['id']
                     break
             except Exception as e:
-                logger.warn("HealthCheck exception in read response " + str(e))
+                LOG.warning("HealthCheck exception in read response, ", str(e))
 
         return engine_id
 
@@ -126,7 +127,7 @@ class HealthCheck(object):
             }
             self.rest.request(method='delete', path=path, data=data)
         except Exception as e:
-            logger.warn("HealthCheck exception in delete request - " + str(e))
+            LOG.warning("HealthCheck exception in delete request, ", str(e))
 
         try:
             path = base % {
@@ -136,7 +137,7 @@ class HealthCheck(object):
             }
             self.rest.request(method='delete', path=path, data=data)
         except Exception as e:
-            logger.warn("HealthCheck exception in delete response - " + str(e))
+            LOG.warning("HealthCheck exception in delete response, ", str(e))
 
 
 if __name__ == "__main__":
@@ -144,20 +145,19 @@ if __name__ == "__main__":
     respondent_id = None
     code = 0
     init_engine(default_config_files=['/etc/valet/valet.conf'])
-    logger = get_logger("ostro_daemon")
 
     if os.path.exists(CONF.engine.pid):
         respondent_id = HealthCheck().ping()
 
         if respondent_id == CONF.engine.priority:
             code = CONF.engine.priority
-            logger.info("HealthCheck - Alive, "
-                        "respondent instance id: {}".format(respondent_id))
+            LOG.info("HealthCheck - Alive, "
+                     "respondent instance id: {}".format(respondent_id))
         else:
-            logger.warn("HealthCheck - pid file exists, "
+            LOG.warning("HealthCheck - pid file exists, "
                         "engine {} did not respond in a timely manner "
                         "(respondent id {})".format(CONF.engine.priority,
                                                     respondent_id))
     else:
-        logger.info("HealthCheck - no pid file, engine is not running!")
+        LOG.info("HealthCheck - no pid file, engine is not running!")
     sys.exit(code)
diff --git a/valet/engine/resource_manager/compute.py b/valet/engine/resource_manager/compute.py
index c139966..0f05878 100644
--- a/valet/engine/resource_manager/compute.py
+++ b/valet/engine/resource_manager/compute.py
@@ -16,6 +16,7 @@ import traceback
 
 from novaclient import client as nova_client
 from oslo_config import cfg
+from oslo_log import log
 
 from resource_base import Flavor
 from resource_base import Host
@@ -26,6 +27,7 @@ from resource_base import LogicalGroup
 VERSION = 2
 
 CONF = cfg.CONF
+LOG = log.getLogger(__name__)
 
 
 class Compute(object):
@@ -37,9 +39,8 @@ class Compute(object):
     Interacts with nova client to perform these actions.
     """
 
-    def __init__(self, _logger):
+    def __init__(self):
         """Compute init."""
-        self.logger = _logger
         self.nova = None
 
     def set_hosts(self, _hosts, _logical_groups):
@@ -48,22 +49,22 @@ class Compute(object):
 
         status = self._set_availability_zones(_hosts, _logical_groups)
         if status != "success":
-            self.logger.error('_set_availability_zones failed')
+            LOG.error('_set_availability_zones failed')
             return status
 
         status = self._set_aggregates(_hosts, _logical_groups)
         if status != "success":
-            self.logger.error('_set_aggregates failed')
+            LOG.error('_set_aggregates failed')
             return status
 
         status = self._set_placed_vms(_hosts, _logical_groups)
         if status != "success":
-            self.logger.error('_set_placed_vms failed')
+            LOG.error('_set_placed_vms failed')
             return status
 
         status = self._set_resources(_hosts)
         if status != "success":
-            self.logger.error('_set_resources failed')
+            LOG.error('_set_resources failed')
             return status
 
         return "success"
@@ -102,11 +103,11 @@ class Compute(object):
                         _hosts[host.name] = host
 
             except (ValueError, KeyError, TypeError):
-                self.logger.error(traceback.format_exc())
+                LOG.error(traceback.format_exc())
                 return "Error while setting host zones from Nova"
 
         except Exception:
-            self.logger.critical(traceback.format_exc())
+            LOG.critical(traceback.format_exc())
 
         return "success"
 
@@ -134,7 +135,7 @@ class Compute(object):
                     aggregate.vms_per_host[host.name] = []
 
         except (ValueError, KeyError, TypeError):
-            self.logger.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
             return "Error while setting host aggregates from Nova"
 
         return "success"
@@ -182,7 +183,7 @@ class Compute(object):
                         _vm_list.append(s['uuid'])
 
         except (ValueError, KeyError, TypeError):
-            self.logger.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
             return "Error while getting existing vms"
 
         return "success"
@@ -201,7 +202,7 @@ class Compute(object):
             _vm_detail.append(status)
 
         except (ValueError, KeyError, TypeError):
-            self.logger.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
             return "Error while getting vm detail"
 
         return "success"
@@ -226,7 +227,7 @@ class Compute(object):
                     host.disk_available_least = float(hv.disk_available_least)
 
         except (ValueError, KeyError, TypeError):
-            self.logger.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
             return "Error while setting host resources from Nova"
 
         return "success"
@@ -287,7 +288,7 @@ class Compute(object):
                 _flavors[flavor.name] = flavor
 
         except (ValueError, KeyError, TypeError):
-            self.logger.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
             return "Error while getting flavors"
 
         return "success"
@@ -308,7 +309,7 @@ class Compute(object):
                 break
 
         except (ValueError, KeyError, TypeError):
-            self.logger.error(traceback.format_exc())
+            LOG.error(traceback.format_exc())
             return "Error while getting flavor extra spec"
 
         return "success"
diff --git a/valet/engine/resource_manager/compute_manager.py b/valet/engine/resource_manager/compute_manager.py
index 65dd9db..bd6aa08 100644
--- a/valet/engine/resource_manager/compute_manager.py
+++ b/valet/engine/resource_manager/compute_manager.py
@@ -15,13 +15,17 @@
 
 """Compute Manager."""
 
+from copy import deepcopy
 import threading
 import time
 
-from copy import deepcopy
+from oslo_log import log
+
 from valet.engine.resource_manager.compute import Compute
 from valet.engine.resource_manager.resource_base import Host
 
+LOG = log.getLogger(__name__)
+
 
 class ComputeManager(threading.Thread):
     """Compute Manager Class.
@@ -30,7 +34,7 @@ class ComputeManager(threading.Thread):
     flavors, etc. Calls many functions from Resource.
     """
 
-    def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config, _logger):
+    def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config):
         """Init Compute Manager."""
         threading.Thread.__init__(self)
 
@@ -43,8 +47,6 @@ class ComputeManager(threading.Thread):
 
         self.config = _config
 
-        self.logger = _logger
-
         self.admin_token = None
         self.project_token = None
 
@@ -52,8 +54,8 @@ class ComputeManager(threading.Thread):
 
     def run(self):
         """Start Compute Manager thread to run setup."""
-        self.logger.info("ComputeManager: start " + self.thread_name +
-                         " ......")
+        LOG.info("ComputeManager: start " + self.thread_name +
+                 " ......")
 
         if self.config.compute_trigger_freq > 0:
             period_end = time.time() + self.config.compute_trigger_freq
@@ -71,21 +73,21 @@ class ComputeManager(threading.Thread):
                                       self.config.compute_trigger_freq)
 
         # NOTE(GJ): do not timer based batch
-        self.logger.info("exit compute_manager " + self.thread_name)
+        LOG.info("exit compute_manager " + self.thread_name)
 
     def _run(self):
-        self.logger.info("ComputeManager: --- start compute_nodes "
-                         "status update ---")
+        LOG.info("ComputeManager: --- start compute_nodes "
+                 "status update ---")
 
         triggered_host_updates = self.set_hosts()
         if triggered_host_updates is not True:
-            self.logger.warn("fail to set hosts from nova")
+            LOG.warning("fail to set hosts from nova")
         triggered_flavor_updates = self.set_flavors()
         if triggered_flavor_updates is not True:
-            self.logger.warn("fail to set flavor from nova")
+            LOG.warning("fail to set flavor from nova")
 
-        self.logger.info("ComputeManager: --- done compute_nodes "
-                         "status update ---")
+        LOG.info("ComputeManager: --- done compute_nodes "
+                 "status update ---")
 
         return True
 
@@ -94,7 +96,7 @@ class ComputeManager(threading.Thread):
         hosts = {}
         logical_groups = {}
 
-        compute = Compute(self.logger)
+        compute = Compute()
 
         status = compute.set_hosts(hosts, logical_groups)
         if status != "success":
@@ -125,8 +127,8 @@ class ComputeManager(threading.Thread):
                     _logical_groups[lk])
 
                 self.resource.logical_groups[lk].last_update = time.time()
-                self.logger.warn("ComputeManager: new logical group (" +
-                                 lk + ") added")
+                LOG.warning("ComputeManager: new logical group (" +
+                            lk + ") added")
                 updated = True
 
         for rlk in self.resource.logical_groups.keys():
@@ -137,8 +139,8 @@ class ComputeManager(threading.Thread):
                     self.resource.logical_groups[rlk].status = "disabled"
 
                     self.resource.logical_groups[rlk].last_update = time.time()
-                    self.logger.warn("ComputeManager: logical group (" +
-                                     rlk + ") removed")
+                    LOG.warning("ComputeManager: logical group (" +
+                                rlk + ") removed")
                     updated = True
 
         for lk in _logical_groups.keys():
@@ -149,8 +151,8 @@ class ComputeManager(threading.Thread):
                 if self._check_logical_group_metadata_update(lg, rlg) is True:
 
                     rlg.last_update = time.time()
-                    self.logger.warn("ComputeManager: logical group (" +
-                                     lk + ") updated")
+                    LOG.warning("ComputeManager: logical group (" +
+                                lk + ") updated")
                     updated = True
 
         return updated
@@ -193,8 +195,8 @@ class ComputeManager(threading.Thread):
                 self.resource.hosts[new_host.name] = new_host
 
                 new_host.last_update = time.time()
-                self.logger.warn("ComputeManager: new host (" +
-                                 new_host.name + ") added")
+                LOG.warning("ComputeManager: new host (" +
+                            new_host.name + ") added")
                 updated = True
 
         for rhk, rhost in self.resource.hosts.iteritems():
@@ -203,8 +205,8 @@ class ComputeManager(threading.Thread):
                     rhost.tag.remove("nova")
 
                     rhost.last_update = time.time()
-                    self.logger.warn("ComputeManager: host (" +
-                                     rhost.name + ") disabled")
+                    LOG.warning("ComputeManager: host (" +
+                                rhost.name + ") disabled")
                     updated = True
 
         for hk in _hosts.keys():
@@ -217,8 +219,8 @@ class ComputeManager(threading.Thread):
         for hk, h in self.resource.hosts.iteritems():
             if h.clean_memberships() is True:
                 h.last_update = time.time()
-                self.logger.warn("ComputeManager: host (" + h.name +
-                                 ") updated (delete EX/AFF/DIV membership)")
+                LOG.warning("ComputeManager: host (" + h.name +
+                            ") updated (delete EX/AFF/DIV membership)")
                 updated = True
 
         for hk, host in self.resource.hosts.iteritems():
@@ -247,20 +249,20 @@ class ComputeManager(threading.Thread):
         if "nova" not in _rhost.tag:
             _rhost.tag.append("nova")
             topology_updated = True
-            self.logger.warn("ComputeManager: host (" + _rhost.name +
-                             ") updated (tag added)")
+            LOG.warning("ComputeManager: host (" + _rhost.name +
+                        ") updated (tag added)")
 
         if _host.status != _rhost.status:
             _rhost.status = _host.status
             topology_updated = True
-            self.logger.warn("ComputeManager: host (" + _rhost.name +
-                             ") updated (status changed)")
+            LOG.warning("ComputeManager: host (" + _rhost.name +
+                        ") updated (status changed)")
 
         if _host.state != _rhost.state:
             _rhost.state = _host.state
             topology_updated = True
-            self.logger.warn("ComputeManager: host (" + _rhost.name +
-                             ") updated (state changed)")
+            LOG.warning("ComputeManager: host (" + _rhost.name +
+                        ") updated (state changed)")
 
         return topology_updated
 
@@ -274,8 +276,8 @@ class ComputeManager(threading.Thread):
             _rhost.original_vCPUs = _host.original_vCPUs
             _rhost.avail_vCPUs = _host.avail_vCPUs
             topology_updated = True
-            self.logger.warn("ComputeManager: host (" + _rhost.name +
-                             ") updated (CPU updated)")
+            LOG.warning("ComputeManager: host (" + _rhost.name +
+                        ") updated (CPU updated)")
 
         if _host.mem_cap != _rhost.mem_cap or \
            _host.original_mem_cap != _rhost.original_mem_cap or \
@@ -284,8 +286,8 @@ class ComputeManager(threading.Thread):
             _rhost.original_mem_cap = _host.original_mem_cap
             _rhost.avail_mem_cap = _host.avail_mem_cap
             topology_updated = True
-            self.logger.warn("ComputeManager: host (" + _rhost.name +
-                             ") updated (mem updated)")
+            LOG.warning("ComputeManager: host (" + _rhost.name +
+                        ") updated (mem updated)")
 
         if _host.local_disk_cap != _rhost.local_disk_cap or \
            _host.original_local_disk_cap != _rhost.original_local_disk_cap or \
@@ -294,8 +296,8 @@ class ComputeManager(threading.Thread):
             _rhost.original_local_disk_cap = _host.original_local_disk_cap
             _rhost.avail_local_disk_cap = _host.avail_local_disk_cap
             topology_updated = True
-            self.logger.warn("ComputeManager: host (" + _rhost.name +
-                             ") updated (local disk space updated)")
+            LOG.warning("ComputeManager: host (" + _rhost.name +
+                        ") updated (local disk space updated)")
 
         if _host.vCPUs_used != _rhost.vCPUs_used or \
            _host.free_mem_mb != _rhost.free_mem_mb or \
@@ -306,8 +308,8 @@ class ComputeManager(threading.Thread):
             _rhost.free_disk_gb = _host.free_disk_gb
             _rhost.disk_available_least = _host.disk_available_least
             topology_updated = True
-            self.logger.warn("ComputeManager: host (" + _rhost.name +
-                             ") updated (other resource numbers)")
+            LOG.warning("ComputeManager: host (" + _rhost.name +
+                        ") updated (other resource numbers)")
 
         return topology_updated
 
@@ -318,8 +320,8 @@ class ComputeManager(threading.Thread):
             if mk not in _rhost.memberships.keys():
                 _rhost.memberships[mk] = self.resource.logical_groups[mk]
                 topology_updated = True
-                self.logger.warn("ComputeManager: host (" + _rhost.name +
-                                 ") updated (new membership)")
+                LOG.warning("ComputeManager: host (" + _rhost.name +
+                            ") updated (new membership)")
 
         for mk in _rhost.memberships.keys():
             m = _rhost.memberships[mk]
@@ -328,8 +330,8 @@ class ComputeManager(threading.Thread):
                 if mk not in _host.memberships.keys():
                     del _rhost.memberships[mk]
                     topology_updated = True
-                    self.logger.warn("ComputeManager: host (" + _rhost.name +
-                                     ") updated (delete membership)")
+                    LOG.warning("ComputeManager: host (" + _rhost.name +
+                                ") updated (delete membership)")
 
         return topology_updated
 
@@ -343,7 +345,7 @@ class ComputeManager(threading.Thread):
         if alen != blen:
             topology_updated = True
             msg = "host ({0}) {1} none vms removed"
-            self.logger.warn(msg.format(_rhost.name, str(blen - alen)))
+            LOG.warning(msg.format(_rhost.name, str(blen - alen)))
 
         self.resource.clean_none_vms_from_logical_groups(_rhost)
 
@@ -351,16 +353,16 @@ class ComputeManager(threading.Thread):
             if _rhost.exist_vm_by_uuid(vm_id[2]) is False:
                 _rhost.vm_list.append(vm_id)
                 topology_updated = True
-                self.logger.warn("ComputeManager: host (" + _rhost.name +
-                                 ") updated (new vm placed)")
+                LOG.warning("ComputeManager: host (" + _rhost.name +
+                            ") updated (new vm placed)")
 
         for rvm_id in _rhost.vm_list:
             if _host.exist_vm_by_uuid(rvm_id[2]) is False:
                 self.resource.remove_vm_by_uuid_from_logical_groups(
                     _rhost, rvm_id[2])
                 topology_updated = True
-                self.logger.warn("ComputeManager: host (" + _rhost.name +
-                                 ") updated (vm removed)")
+                LOG.warning("ComputeManager: host (" + _rhost.name +
+                            ") updated (vm removed)")
 
         blen = len(_rhost.vm_list)
         _rhost.vm_list = [
@@ -369,7 +371,7 @@ class ComputeManager(threading.Thread):
         if alen != blen:
             topology_updated = True
             msg = "host ({0}) {1} vms removed"
-            self.logger.warn(msg.format(_rhost.name, str(blen - alen)))
+            LOG.warning(msg.format(_rhost.name, str(blen - alen)))
 
         return topology_updated
 
@@ -377,11 +379,11 @@ class ComputeManager(threading.Thread):
         """Return True if compute set flavors returns success."""
         flavors = {}
 
-        compute = Compute(self.logger)
+        compute = Compute()
 
         status = compute.set_flavors(flavors)
         if status != "success":
-            self.logger.error(status)
+            LOG.error(status)
             return False
 
         self.data_lock.acquire()
@@ -399,8 +401,8 @@ class ComputeManager(threading.Thread):
                 self.resource.flavors[fk] = deepcopy(_flavors[fk])
 
                 self.resource.flavors[fk].last_update = time.time()
-                self.logger.warn("ComputeManager: new flavor (" +
-                                 fk + ":" + _flavors[fk].flavor_id + ") added")
+                LOG.warning("ComputeManager: new flavor (" +
+                            fk + ":" + _flavors[fk].flavor_id + ") added")
                 updated = True
 
         for rfk in self.resource.flavors.keys():
@@ -409,8 +411,8 @@ class ComputeManager(threading.Thread):
                 rf.status = "disabled"
 
                 rf.last_update = time.time()
-                self.logger.warn("ComputeManager: flavor (" + rfk + ":" +
-                                 rf.flavor_id + ") removed")
+                LOG.warning("ComputeManager: flavor (" + rfk + ":" +
+                            rf.flavor_id + ") removed")
                 updated = True
 
         for fk in _flavors.keys():
@@ -419,8 +421,8 @@ class ComputeManager(threading.Thread):
 
             if self._check_flavor_spec_update(f, rf) is True:
                 rf.last_update = time.time()
-                self.logger.warn("ComputeManager: flavor (" + fk + ":" +
-                                 rf.flavor_id + ") spec updated")
+                LOG.warning("ComputeManager: flavor (" + fk + ":" +
+                            rf.flavor_id + ") spec updated")
                 updated = True
 
         return updated
diff --git a/valet/engine/resource_manager/resource.py b/valet/engine/resource_manager/resource.py
index 4424883..6c32269 100644
--- a/valet/engine/resource_manager/resource.py
+++ b/valet/engine/resource_manager/resource.py
@@ -15,6 +15,8 @@
 import time
 import traceback
 
+from oslo_log import log
+
 from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
 from valet.engine.resource_manager.resource_base import Datacenter
 from valet.engine.resource_manager.resource_base import Flavor
@@ -22,6 +24,8 @@ from valet.engine.resource_manager.resource_base import Host
 from valet.engine.resource_manager.resource_base import HostGroup
 from valet.engine.resource_manager.resource_base import LogicalGroup
 
+LOG = log.getLogger(__name__)
+
 
 class Resource(object):
     """Resource Class.
@@ -32,12 +36,11 @@ class Resource(object):
     updates to base resource types.
     """
 
-    def __init__(self, _db, _config, _logger):
+    def __init__(self, _db, _config):
         """Init Resource Class."""
         self.db = _db
 
         self.config = _config
-        self.logger = _logger
 
         """ resource data """
         self.datacenter = Datacenter(self.config.datacenter_name)
@@ -64,7 +67,7 @@ class Resource(object):
     def bootstrap_from_db(self, _resource_status):
         """Return True if bootsrap resource from database successful."""
         try:
-            self.logger.info("Resource status from DB = %s", _resource_status)
+            LOG.info("Resource status from DB = %s", _resource_status)
             logical_groups = _resource_status.get("logical_groups")
             if logical_groups:
                 for lgk, lg in logical_groups.iteritems():
@@ -78,7 +81,7 @@ class Resource(object):
                     self.logical_groups[lgk] = logical_group
 
             if len(self.logical_groups) == 0:
-                self.logger.warn("no logical_groups")
+                LOG.warning("no logical_groups")
 
             flavors = _resource_status.get("flavors")
             if flavors:
@@ -94,7 +97,7 @@ class Resource(object):
                     self.flavors[fk] = flavor
 
             if len(self.flavors) == 0:
-                self.logger.error("fail loading flavors")
+                LOG.error("fail loading flavors")
 
             hosts = _resource_status.get("hosts")
             if hosts:
@@ -124,7 +127,7 @@ class Resource(object):
                     self.hosts[hk] = host
 
                 if len(self.hosts) == 0:
-                    self.logger.error("fail loading hosts")
+                    LOG.error("fail loading hosts")
 
             host_groups = _resource_status.get("host_groups")
             if host_groups:
@@ -151,7 +154,7 @@ class Resource(object):
                     self.host_groups[hgk] = host_group
 
                 if len(self.host_groups) == 0:
-                    self.logger.warn("fail loading host_groups")
+                    LOG.warning("fail loading host_groups")
 
             dc = _resource_status.get("datacenter")
             if dc:
@@ -181,7 +184,7 @@ class Resource(object):
                         self.datacenter.resources[ck] = self.hosts[ck]
 
                 if len(self.datacenter.resources) == 0:
-                    self.logger.error("fail loading datacenter")
+                    LOG.error("fail loading datacenter")
 
             hgs = _resource_status.get("host_groups")
             if hgs:
@@ -215,8 +218,8 @@ class Resource(object):
             self._update_compute_avail()
 
         except Exception:
-            self.logger.error("while bootstrap_from_db: ",
-                              traceback.format_exc())
+            LOG.error("while bootstrap_from_db: ",
+                      traceback.format_exc())
 
         return True
 
@@ -314,7 +317,7 @@ class Resource(object):
         host_group_updates = {}
         datacenter_update = None
 
-        self.logger.info("check and store resource status")
+        LOG.info("check and store resource status")
 
         for fk, flavor in self.flavors.iteritems():
             if flavor.last_update >= self.curr_db_timestamp:
@@ -366,66 +369,66 @@ class Resource(object):
         return True
 
     def show_current_logical_groups(self):
-        self.logger.debug("--- track logical groups info ---")
+        LOG.debug("--- track logical groups info ---")
         for lgk, lg in self.logical_groups.iteritems():
             if lg.status == "enabled":
-                self.logger.debug("lg name = " + lgk)
-                self.logger.debug("    type = " + lg.group_type)
+                LOG.debug("lg name = " + lgk)
+                LOG.debug("    type = " + lg.group_type)
                 if lg.group_type == "AGGR":
                     for k in lg.metadata.keys():
-                        self.logger.debug("        metadata key = " + k)
-                self.logger.debug("    vms")
+                        LOG.debug("        metadata key = " + k)
+                LOG.debug("    vms")
                 debug_msg = "        orch_id = %s uuid = %s"
                 for v in lg.vm_list:
-                    self.logger.debug(debug_msg, v[0], v[2])
-                self.logger.debug("    hosts")
+                    LOG.debug(debug_msg % (v[0], v[2]))
+                LOG.debug("    hosts")
                 for h, v in lg.vms_per_host.iteritems():
-                    self.logger.debug("        host = %s", h)
-                    self.logger.debug("        vms = %s",
-                                      str(len(lg.vms_per_host[h])))
+                    LOG.debug("        host = %s" % h)
+                    LOG.debug("        vms = %s" %
+                              str(len(lg.vms_per_host[h])))
                     host = None
                     if h in self.hosts.keys():
                         host = self.hosts[h]
                     elif h in self.host_groups.keys():
                         host = self.host_groups[h]
                     else:
-                        self.logger.error("TEST: lg member not exist")
+                        LOG.error("TEST: lg member not exist")
                     if host is not None:
-                        self.logger.debug("        status = " + host.status)
+                        LOG.debug("        status = " + host.status)
                         if lgk not in host.memberships.keys():
-                            self.logger.error("TEST: membership missing")
+                            LOG.error("TEST: membership missing")
 
     def show_current_host_status(self):
-        self.logger.debug("--- track host info ---")
+        LOG.debug("--- track host info ---")
         for hk, h in self.hosts.iteritems():
-            self.logger.debug("host name = " + hk)
-            self.logger.debug("    status = " + h.status + ", " + h.state)
-            self.logger.debug("    vms = " + str(len(h.vm_list)))
-            self.logger.debug("    resources (org, total, avail, used)")
+            LOG.debug("host name = " + hk)
+            LOG.debug("    status = " + h.status + ", " + h.state)
+            LOG.debug("    vms = " + str(len(h.vm_list)))
+            LOG.debug("    resources (org, total, avail, used)")
             cpu_org = str(h.original_vCPUs)
             cpu_tot = str(h.vCPUs)
             cpu_avail = str(h.avail_vCPUs)
             cpu_used = str(h.vCPUs_used)
             msg = "      {0} = {1}, {2}, {3}, {4}"
-            self.logger.debug(
+            LOG.debug(
                 msg.format('cpu', cpu_org, cpu_tot, cpu_avail, cpu_used))
             mem_org = str(h.original_mem_cap)
             mem_tot = str(h.mem_cap)
             mem_avail = str(h.avail_mem_cap)
             mem_used = str(h.free_mem_mb)
-            self.logger.debug(
+            LOG.debug(
                 msg.format('mem', mem_org, mem_tot, mem_avail, mem_used))
             dsk_org = str(h.original_local_disk_cap)
             dsk_tot = str(h.local_disk_cap)
             dsk_avail = str(h.avail_local_disk_cap)
             dsk_used = str(h.free_disk_gb)
-            self.logger.debug(
+            LOG.debug(
                 msg.format('disk', dsk_org, dsk_tot, dsk_avail, dsk_used))
-            self.logger.debug("    memberships")
+            LOG.debug("    memberships")
             for mk in h.memberships.keys():
-                self.logger.debug("        " + mk)
+                LOG.debug("        " + mk)
                 if mk not in self.logical_groups.keys():
-                    self.logger.error("TEST: lg missing")
+                    LOG.error("TEST: lg missing")
 
     def update_rack_resource(self, _host):
         """Update resources for rack (host), then update cluster."""
@@ -509,8 +512,9 @@ class Resource(object):
 
         if host.status != _st:
             host.status = _st
-            self.logger.info(
-                "Resource.update_host_resources: host(%s) status changed", _hn)
+            LOG.warning(
+                "Resource.update_host_resources: host(%s) status changed" %
+                _hn)
             updated = True
 
         # FIXME(GJ): should check cpu, memm and disk here?
@@ -577,8 +581,8 @@ class Resource(object):
         """Remove vm by orchestration id from lgs. Update host and lgs."""
         for lgk in _host.memberships.keys():
             if lgk not in self.logical_groups.keys():
-                self.logger.warn("logical group (%s) missing while "
-                                 "removing %s", lgk, _h_uuid)
+                LOG.warning("logical group (%s) missing while "
+                            "removing %s" % (lgk, _h_uuid))
                 continue
             lg = self.logical_groups[lgk]
 
@@ -617,8 +621,8 @@ class Resource(object):
         """Remove vm by uuid from lgs and update proper host and lgs."""
         for lgk in _host.memberships.keys():
             if lgk not in self.logical_groups.keys():
-                self.logger.warn("logical group (%s) missing while "
-                                 "removing %s", lgk, _uuid)
+                LOG.warning("logical group (%s) missing while "
+                            "removing %s" % (lgk, _uuid))
                 continue
             lg = self.logical_groups[lgk]
 
diff --git a/valet/engine/resource_manager/topology.py b/valet/engine/resource_manager/topology.py
index 0c563ce..ec2d4c2 100644
--- a/valet/engine/resource_manager/topology.py
+++ b/valet/engine/resource_manager/topology.py
@@ -16,10 +16,14 @@
 """Topology class - performs actual setting up of Topology object."""
 
 import copy
+
+from oslo_log import log
 from sre_parse import isdigit
 
 from valet.engine.resource_manager.resource_base import HostGroup
 
+LOG = log.getLogger(__name__)
+
 
 class Topology(object):
     """Topology class.
@@ -27,10 +31,9 @@ class Topology(object):
     currently, using cannonical naming convention to find the topology
     """
 
-    def __init__(self, _config, _logger):
+    def __init__(self, _config):
         """Init config and logger."""
         self.config = _config
-        self.logger = _logger
 
     # Triggered by rhosts change
     def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts):
@@ -52,7 +55,7 @@ class Topology(object):
 
             (region_name, rack_name, _, status) = self._set_layout_by_name(rhk)
             if status != "success":
-                self.logger.warn(status + " in host_name (" + rhk + ")")
+                LOG.warning(status + " in host_name (" + rhk + ")")
 
             if region_name not in _datacenter.region_code_list:
                 _datacenter.region_code_list.append(region_name)
@@ -76,10 +79,10 @@ class Topology(object):
             _datacenter.resources[hgk] = hg
 
         if len(_datacenter.region_code_list) > 1:
-            self.logger.warn("more than one region code")
+            LOG.warning("more than one region code")
 
         if "none" in _host_groups.keys():
-            self.logger.warn("some hosts are into unknown rack")
+            LOG.warning("some hosts are into unknown rack")
 
         return "success"
 
diff --git a/valet/engine/resource_manager/topology_manager.py b/valet/engine/resource_manager/topology_manager.py
index 91c5731..806e3ff 100644
--- a/valet/engine/resource_manager/topology_manager.py
+++ b/valet/engine/resource_manager/topology_manager.py
@@ -15,17 +15,21 @@
 import threading
 import time
 
+from oslo_log import log
+
 from valet.engine.resource_manager.resource_base import Datacenter
 from valet.engine.resource_manager.resource_base import Host
 from valet.engine.resource_manager.resource_base import HostGroup
 from valet.engine.resource_manager.topology import Topology
 
+LOG = log.getLogger(__name__)
+
 
 class TopologyManager(threading.Thread):
     """Topology Manager Class."""
 
     def __init__(self, _t_id, _t_name, _resource,
-                 _data_lock, _config, _logger):
+                 _data_lock, _config):
         """Init Topology Manager."""
         threading.Thread.__init__(self)
 
@@ -37,14 +41,13 @@ class TopologyManager(threading.Thread):
         self.resource = _resource
 
         self.config = _config
-        self.logger = _logger
 
         self.update_batch_wait = self.config.update_batch_wait
 
     def run(self):
         """Function starts and tracks Topology Manager Thread."""
-        self.logger.info("TopologyManager: start " +
-                         self.thread_name + " ......")
+        LOG.info("TopologyManager: start " +
+                 self.thread_name + " ......")
 
         if self.config.topology_trigger_freq > 0:
             period_end = time.time() + self.config.topology_trigger_freq
@@ -61,17 +64,17 @@ class TopologyManager(threading.Thread):
                         period_end = (curr_ts +
                                       self.config.topology_trigger_freq)
         # NOTE(GJ): do not timer based batch
-        self.logger.info("exit topology_manager " + self.thread_name)
+        LOG.info("exit topology_manager " + self.thread_name)
 
     def _run(self):
 
-        self.logger.info("TopologyManager: --- start topology "
-                         "status update ---")
+        LOG.info("TopologyManager: --- start topology "
+                 "status update ---")
 
         if self.set_topology() is not True:
-            self.logger.warn("fail to set topology")
+            LOG.warning("fail to set topology")
 
-        self.logger.info("--- done topology status update ---")
+        LOG.info("--- done topology status update ---")
 
     def set_topology(self):
         host_groups = {}
@@ -80,7 +83,7 @@ class TopologyManager(threading.Thread):
         # NOTE(GJ): do not consider switch topology at this version
 
         datacenter = Datacenter(self.config.datacenter_name)
-        topology = Topology(self.config, self.logger)
+        topology = Topology(self.config)
 
         status = topology.set_topology(datacenter, host_groups, hosts,
                                        self.resource.hosts)
@@ -104,8 +107,8 @@ class TopologyManager(threading.Thread):
 
                 new_host.last_update = time.time()
 
-                self.logger.info("TopologyManager: new host (" +
-                                 new_host.name + ") added from configuration")
+                LOG.warning("TopologyManager: new host (" +
+                            new_host.name + ") added from configuration")
                 updated = True
 
         for rhk in self.resource.hosts.keys():
@@ -116,8 +119,8 @@ class TopologyManager(threading.Thread):
 
                 host.last_update = time.time()
 
-                self.logger.info("TopologyManager: host (" +
-                                 host.name + ") removed from configuration")
+                LOG.warning("TopologyManager: host (" +
+                            host.name + ") removed from configuration")
                 updated = True
 
         for hgk in _host_groups.keys():
@@ -127,8 +130,8 @@ class TopologyManager(threading.Thread):
 
                 new_host_group.last_update = time.time()
 
-                self.logger.info("TopologyManager: new host_group (" +
-                                 new_host_group.name + ") added")
+                LOG.warning("TopologyManager: new host_group (" +
+                            new_host_group.name + ") added")
                 updated = True
 
         for rhgk in self.resource.host_groups.keys():
@@ -138,8 +141,8 @@ class TopologyManager(threading.Thread):
 
                 host_group.last_update = time.time()
 
-                self.logger.info("TopologyManager: host_group (" +
-                                 host_group.name + ") disabled")
+                LOG.warning("TopologyManager: host_group (" +
+                            host_group.name + ") disabled")
                 updated = True
 
         for hk in _hosts.keys():
@@ -191,8 +194,8 @@ class TopologyManager(threading.Thread):
         if "infra" not in _rhost.tag:
             _rhost.tag.append("infra")
             updated = True
-            self.logger.info("TopologyManager: host (" + _rhost.name +
-                             ") updated (tag)")
+            LOG.warning("TopologyManager: host (" + _rhost.name +
+                        ") updated (tag)")
 
         if (_rhost.host_group is None or
                 _host.host_group.name != _rhost.host_group.name):
@@ -203,8 +206,8 @@ class TopologyManager(threading.Thread):
             else:
                 _rhost.host_group = self.resource.datacenter
             updated = True
-            self.logger.info("TopologyManager: host (" + _rhost.name +
-                             ") updated (host_group)")
+            LOG.warning("TopologyManager: host (" + _rhost.name +
+                        ") updated (host_group)")
 
         return updated
 
@@ -214,14 +217,14 @@ class TopologyManager(threading.Thread):
         if _hg.host_type != _rhg.host_type:
             _rhg.host_type = _hg.host_type
             updated = True
-            self.logger.info("TopologyManager: host_group (" + _rhg.name +
-                             ") updated (hosting type)")
+            LOG.warning("TopologyManager: host_group (" + _rhg.name +
+                        ") updated (hosting type)")
 
         if _rhg.status == "disabled":
             _rhg.status = "enabled"
             updated = True
-            self.logger.info("TopologyManager: host_group (" + _rhg.name +
-                             ") updated (enabled)")
+            LOG.warning("TopologyManager: host_group (" + _rhg.name +
+                        ") updated (enabled)")
 
         if _hg.parent_resource != _rhg.parent_resource:
             if _hg.parent_resource.name in self.resource.host_groups.keys():
@@ -230,8 +233,8 @@ class TopologyManager(threading.Thread):
             else:
                 _rhg.parent_resource = self.resource.datacenter
             updated = True
-            self.logger.info("TopologyManager: host_group (" + _rhg.name +
-                             ") updated (parent host_group)")
+            LOG.warning("TopologyManager: host_group (" + _rhg.name +
+                        ") updated (parent host_group)")
 
         for rk in _hg.child_resources.keys():
             exist = False
@@ -245,8 +248,8 @@ class TopologyManager(threading.Thread):
                 elif _rhg.host_type == "cluster":
                     _rhg.child_resources[rk] = self.resource.host_groups[rk]
                 updated = True
-                self.logger.info("TopologyManager: host_group (" + _rhg.name +
-                                 ") updated (new child host)")
+                LOG.warning("TopologyManager: host_group (" + _rhg.name +
+                            ") updated (new child host)")
 
         for rrk in _rhg.child_resources.keys():
             exist = False
@@ -257,8 +260,8 @@ class TopologyManager(threading.Thread):
             if exist is False:
                 del _rhg.child_resources[rrk]
                 updated = True
-                self.logger.info("TopologyManager: host_group (" + _rhg.name +
-                                 ") updated (child host removed)")
+                LOG.warning("TopologyManager: host_group (" + _rhg.name +
+                            ") updated (child host removed)")
 
         return updated
 
@@ -269,8 +272,8 @@ class TopologyManager(threading.Thread):
             if rc not in self.resource.datacenter.region_code_list:
                 self.resource.datacenter.region_code_list.append(rc)
                 updated = True
-                self.logger.info("TopologyManager: datacenter updated "
-                                 "(new region code, " + rc + ")")
+                LOG.warning("TopologyManager: datacenter updated "
+                            "(new region code, " + rc + ")")
 
         code_list = self.resource.datacenter.region_code_list
         blen = len(code_list)
@@ -279,7 +282,7 @@ class TopologyManager(threading.Thread):
         if alen != blen:
             updated = True
             self.resource.datacenter.region_code_list = code_list
-            self.logger.info("datacenter updated (region code removed)")
+            LOG.warning("datacenter updated (region code removed)")
 
         for rk in _datacenter.resources.keys():
             exist = False
@@ -296,8 +299,8 @@ class TopologyManager(threading.Thread):
                     self.resource.datacenter.resources[rk] = \
                         self.resource.hosts[rk]
                 updated = True
-                self.logger.info("TopologyManager: datacenter updated "
-                                 "(new resource)")
+                LOG.warning("TopologyManager: datacenter updated "
+                            "(new resource)")
 
         for rrk in self.resource.datacenter.resources.keys():
             exist = False
@@ -308,7 +311,7 @@ class TopologyManager(threading.Thread):
             if exist is False:
                 del self.resource.datacenter.resources[rrk]
                 updated = True
-                self.logger.info("TopologyManager: datacenter updated "
-                                 "(resource removed)")
+                LOG.warning("TopologyManager: datacenter updated "
+                            "(resource removed)")
 
         return updated
diff --git a/valet/tests/unit/engine/test_search.py b/valet/tests/unit/engine/test_search.py
index 198f95e..bfe92e6 100644
--- a/valet/tests/unit/engine/test_search.py
+++ b/valet/tests/unit/engine/test_search.py
@@ -12,16 +12,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-"""Test Search."""
-
-import logging
 import mock
+
 from valet.engine.optimizer.ostro.search import Search
 from valet.tests.base import Base
 
-LOG = logging.getLogger(__name__)
-
 
 class TestSearch(Base):
     """Unit tests for valet.engine.optimizer.ostro.search."""
@@ -30,7 +25,7 @@ class TestSearch(Base):
         """Setup Test Search Class."""
         super(TestSearch, self).setUp()
 
-        self.search = Search(LOG)
+        self.search = Search()
 
     def test_copy_resource_status(self):
         """Test Copy Resource Status."""
diff --git a/valet/tests/unit/engine/test_topology.py b/valet/tests/unit/engine/test_topology.py
index c5a9b59..45726b9 100644
--- a/valet/tests/unit/engine/test_topology.py
+++ b/valet/tests/unit/engine/test_topology.py
@@ -25,7 +25,7 @@ class TestTopology(Base):
     def setUp(self):
         """Setup TestTopology Test Class."""
         super(TestTopology, self).setUp()
-        self.topo = Topology(Config(), None)
+        self.topo = Topology(Config())
 
     def test_simple_topology(self):
         """Validate simple topology (region, rack, node_type and status)."""