Merge "Valet on Stable Newton Devstack"
This commit is contained in:
commit
2109da9972
@ -99,10 +99,9 @@ class ValetFilter(filters.BaseHostFilter):
|
|||||||
cfg.CONF.register_opts(opts, group=opt_group)
|
cfg.CONF.register_opts(opts, group=opt_group)
|
||||||
|
|
||||||
# TODO(JD): Factor out common code between this and the cinder filter
|
# TODO(JD): Factor out common code between this and the cinder filter
|
||||||
def filter_all(self, filter_obj_list, filter_properties):
|
def filter_all(self, filter_obj_list, request_spec):
|
||||||
'''Filter all hosts in one swell foop'''
|
'''Filter all hosts in one swell foop'''
|
||||||
|
|
||||||
hints_key = 'scheduler_hints'
|
|
||||||
orch_id_key = 'heat_resource_uuid'
|
orch_id_key = 'heat_resource_uuid'
|
||||||
|
|
||||||
ad_hoc = False
|
ad_hoc = False
|
||||||
@ -113,10 +112,9 @@ class ValetFilter(filters.BaseHostFilter):
|
|||||||
failure_mode = opt[self.opt_failure_mode_str]
|
failure_mode = opt[self.opt_failure_mode_str]
|
||||||
|
|
||||||
# Get the resource_id (physical id) and host candidates
|
# Get the resource_id (physical id) and host candidates
|
||||||
request_spec = filter_properties.get('request_spec')
|
res_id = request_spec.instance_uuid
|
||||||
instance_properties = request_spec.get('instance_properties')
|
|
||||||
res_id = instance_properties.get('uuid')
|
|
||||||
hosts = [obj.host for obj in filter_obj_list]
|
hosts = [obj.host for obj in filter_obj_list]
|
||||||
|
hints = request_spec.scheduler_hints
|
||||||
|
|
||||||
# TODO(JD): If we can't reach Valet at all, we may opt to fail
|
# TODO(JD): If we can't reach Valet at all, we may opt to fail
|
||||||
# TODO(JD): all hosts depending on a TBD config flag.
|
# TODO(JD): all hosts depending on a TBD config flag.
|
||||||
@ -128,6 +126,7 @@ class ValetFilter(filters.BaseHostFilter):
|
|||||||
self._authorize()
|
self._authorize()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
failed = ex
|
failed = ex
|
||||||
|
|
||||||
if failed:
|
if failed:
|
||||||
msg = _LW("Failed to filter the hosts, failure mode is %s")
|
msg = _LW("Failed to filter the hosts, failure mode is %s")
|
||||||
LOG.warn(msg % failure_mode)
|
LOG.warn(msg % failure_mode)
|
||||||
@ -136,16 +135,14 @@ class ValetFilter(filters.BaseHostFilter):
|
|||||||
yield_all = True
|
yield_all = True
|
||||||
else:
|
else:
|
||||||
LOG.error(failed)
|
LOG.error(failed)
|
||||||
# if not filter_properties.get(hints_key, {}).has_key(orch_id_key):
|
elif orch_id_key not in hints:
|
||||||
elif orch_id_key not in filter_properties.get(hints_key, {}):
|
|
||||||
msg = _LW("Valet: Heat Stack Lifecycle Scheduler Hints not found. "
|
msg = _LW("Valet: Heat Stack Lifecycle Scheduler Hints not found. "
|
||||||
"Performing ad-hoc placement.")
|
"Performing ad-hoc placement.")
|
||||||
LOG.info(msg)
|
LOG.info(msg)
|
||||||
ad_hoc = True
|
ad_hoc = True
|
||||||
|
|
||||||
# We'll need the flavor.
|
# We'll need the flavor.
|
||||||
instance_type = filter_properties.get('instance_type')
|
flavor = request_spec.flavor.flavorid
|
||||||
flavor = instance_type.get('name')
|
|
||||||
|
|
||||||
# Because this wasn't orchestrated, there's no stack.
|
# Because this wasn't orchestrated, there's no stack.
|
||||||
# We're going to compose a resource as if there as one.
|
# We're going to compose a resource as if there as one.
|
||||||
@ -163,7 +160,7 @@ class ValetFilter(filters.BaseHostFilter):
|
|||||||
|
|
||||||
# Only add the AZ if it was expressly defined
|
# Only add the AZ if it was expressly defined
|
||||||
res_properties = resources[res_id]["properties"]
|
res_properties = resources[res_id]["properties"]
|
||||||
a_zone = instance_properties.get('availability_zone')
|
a_zone = request_spec.availability_zone
|
||||||
if a_zone:
|
if a_zone:
|
||||||
res_properties["availability_zone"] = a_zone
|
res_properties["availability_zone"] = a_zone
|
||||||
|
|
||||||
@ -216,7 +213,7 @@ class ValetFilter(filters.BaseHostFilter):
|
|||||||
else:
|
else:
|
||||||
yield_all = False
|
yield_all = False
|
||||||
else:
|
else:
|
||||||
orch_id = filter_properties[hints_key][orch_id_key]
|
orch_id = hints[orch_id_key]
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
response = None
|
response = None
|
||||||
|
@ -26,7 +26,7 @@ def _messaging_notifier_from_config(config):
|
|||||||
transport = messaging.get_notification_transport(cfg.CONF, transport_url)
|
transport = messaging.get_notification_transport(cfg.CONF, transport_url)
|
||||||
notifier = messaging.Notifier(transport, driver='messaging',
|
notifier = messaging.Notifier(transport, driver='messaging',
|
||||||
publisher_id='valet',
|
publisher_id='valet',
|
||||||
topic='notifications', retry=10)
|
topics=['notifications'], retry=10)
|
||||||
return notifier
|
return notifier
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,10 +17,14 @@
|
|||||||
|
|
||||||
from abc import ABCMeta
|
from abc import ABCMeta
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
|
from importlib import import_module
|
||||||
import inspect
|
import inspect
|
||||||
|
import os
|
||||||
|
import pkgutil
|
||||||
|
import uuid
|
||||||
|
|
||||||
from pecan import conf
|
from pecan import conf
|
||||||
import six
|
import six
|
||||||
import uuid
|
|
||||||
|
|
||||||
from valet import api
|
from valet import api
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
@ -29,12 +33,13 @@ from valet.common.music import Music
|
|||||||
|
|
||||||
def get_class(kls):
|
def get_class(kls):
|
||||||
"""Returns a class given a fully qualified class name"""
|
"""Returns a class given a fully qualified class name"""
|
||||||
parts = kls.split('.')
|
pkg_path = os.path.dirname(__file__)
|
||||||
module = ".".join(parts[:-1])
|
for loader, mod_name, is_pkg in pkgutil.iter_modules([pkg_path]):
|
||||||
mod = __import__(module)
|
mod = import_module('valet.api.db.models.music.' + mod_name)
|
||||||
for comp in parts[1:]:
|
cls = getattr(mod, kls, None)
|
||||||
mod = getattr(mod, comp)
|
if cls:
|
||||||
return mod
|
return cls
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
|
class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
|
||||||
@ -200,8 +205,7 @@ class Query(object):
|
|||||||
if inspect.isclass(model):
|
if inspect.isclass(model):
|
||||||
self.model = model
|
self.model = model
|
||||||
elif isinstance(model, basestring):
|
elif isinstance(model, basestring):
|
||||||
self.model = get_class(
|
self.model = get_class(model)
|
||||||
'valet.api.db.models.music.placements.' + model)
|
|
||||||
assert inspect.isclass(self.model)
|
assert inspect.isclass(self.model)
|
||||||
|
|
||||||
def __kwargs(self):
|
def __kwargs(self):
|
||||||
|
@ -17,12 +17,12 @@ from pecan.commands.base import BaseCommand
|
|||||||
from valet import api
|
from valet import api
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.db import models
|
from valet.api.db import models
|
||||||
from valet.api.db.models import Event
|
from valet.api.db.models.music.groups import Group
|
||||||
from valet.api.db.models import Group
|
from valet.api.db.models.music.ostro import Event
|
||||||
from valet.api.db.models import Placement
|
from valet.api.db.models.music.ostro import PlacementRequest
|
||||||
from valet.api.db.models import PlacementRequest
|
from valet.api.db.models.music.ostro import PlacementResult
|
||||||
from valet.api.db.models import PlacementResult
|
from valet.api.db.models.music.placements import Placement
|
||||||
from valet.api.db.models import Plan
|
from valet.api.db.models.music.plans import Plan
|
||||||
from valet.common.conf import get_logger
|
from valet.common.conf import get_logger
|
||||||
from valet.common.conf import init_conf
|
from valet.common.conf import init_conf
|
||||||
|
|
||||||
@ -35,8 +35,6 @@ class PopulateCommand(BaseCommand):
|
|||||||
super(PopulateCommand, self).run(args)
|
super(PopulateCommand, self).run(args)
|
||||||
try:
|
try:
|
||||||
init_conf("populate.log")
|
init_conf("populate.log")
|
||||||
# cfg.CONF.log_file = "populate.log"
|
|
||||||
# cfg.CONF.use_stderr = True
|
|
||||||
LOG = api.LOG = get_logger("populate")
|
LOG = api.LOG = get_logger("populate")
|
||||||
LOG.info(_("Loading environment"))
|
LOG.info(_("Loading environment"))
|
||||||
self.load_app()
|
self.load_app()
|
||||||
|
@ -65,12 +65,12 @@ class ListenerManager(threading.Thread):
|
|||||||
self.MUSIC = {'engine': engine,
|
self.MUSIC = {'engine': engine,
|
||||||
'keyspace': self.config.music.keyspace}
|
'keyspace': self.config.music.keyspace}
|
||||||
self.listener_logger.debug(
|
self.listener_logger.debug(
|
||||||
'Storing in music on %s, keyspace %s' %
|
'Storing in music on %s, keyspace %s',
|
||||||
(self.config.music.host, self.config.music.keyspace))
|
self.config.music.hosts, self.config.music.keyspace)
|
||||||
|
|
||||||
self.listener_logger.debug('Connecting to %s, with %s' %
|
self.listener_logger.debug('Connecting to %s, with %s',
|
||||||
(self.config.messaging.host,
|
self.config.messaging.host,
|
||||||
self.config.messaging.username))
|
self.config.messaging.username)
|
||||||
credentials = pika.PlainCredentials(self.config.messaging.username,
|
credentials = pika.PlainCredentials(self.config.messaging.username,
|
||||||
self.config.messaging.password)
|
self.config.messaging.password)
|
||||||
parameters = pika.ConnectionParameters(self.config.messaging.host,
|
parameters = pika.ConnectionParameters(self.config.messaging.host,
|
||||||
@ -103,7 +103,7 @@ class ListenerManager(threading.Thread):
|
|||||||
# Bind the queue to the selected exchange
|
# Bind the queue to the selected exchange
|
||||||
channel.queue_bind(exchange=exchange_name, queue=queue_name,
|
channel.queue_bind(exchange=exchange_name, queue=queue_name,
|
||||||
routing_key=binding_key)
|
routing_key=binding_key)
|
||||||
self.listener_logger.info('Channel is bound,listening on%s '
|
self.listener_logger.info('Channel is bound,listening on %s '
|
||||||
'exchange %s',
|
'exchange %s',
|
||||||
self.config.messaging.host,
|
self.config.messaging.host,
|
||||||
self.config.events_listener.exchange)
|
self.config.events_listener.exchange)
|
||||||
|
@ -64,6 +64,7 @@ class Resource(object):
|
|||||||
def bootstrap_from_db(self, _resource_status):
|
def bootstrap_from_db(self, _resource_status):
|
||||||
"""Return True if bootsrap resource from database successful."""
|
"""Return True if bootsrap resource from database successful."""
|
||||||
try:
|
try:
|
||||||
|
self.logger.info("Resource status from DB = %s", _resource_status)
|
||||||
logical_groups = _resource_status.get("logical_groups")
|
logical_groups = _resource_status.get("logical_groups")
|
||||||
if logical_groups:
|
if logical_groups:
|
||||||
for lgk, lg in logical_groups.iteritems():
|
for lgk, lg in logical_groups.iteritems():
|
||||||
@ -376,11 +377,11 @@ class Resource(object):
|
|||||||
self.logger.debug(" vms")
|
self.logger.debug(" vms")
|
||||||
debug_msg = " orch_id = %s uuid = %s"
|
debug_msg = " orch_id = %s uuid = %s"
|
||||||
for v in lg.vm_list:
|
for v in lg.vm_list:
|
||||||
self.logger.debug(debug_msg % (v[0], v[2]))
|
self.logger.debug(debug_msg, v[0], v[2])
|
||||||
self.logger.debug(" hosts")
|
self.logger.debug(" hosts")
|
||||||
for h, v in lg.vms_per_host.iteritems():
|
for h, v in lg.vms_per_host.iteritems():
|
||||||
self.logger.debug(" host = %s" % h)
|
self.logger.debug(" host = %s", h)
|
||||||
self.logger.debug(" vms = %s" %
|
self.logger.debug(" vms = %s",
|
||||||
str(len(lg.vms_per_host[h])))
|
str(len(lg.vms_per_host[h])))
|
||||||
host = None
|
host = None
|
||||||
if h in self.hosts.keys():
|
if h in self.hosts.keys():
|
||||||
@ -508,9 +509,8 @@ class Resource(object):
|
|||||||
|
|
||||||
if host.status != _st:
|
if host.status != _st:
|
||||||
host.status = _st
|
host.status = _st
|
||||||
self.logger.warn(
|
self.logger.info(
|
||||||
"Resource.update_host_resources: host(%s) status changed" %
|
"Resource.update_host_resources: host(%s) status changed", _hn)
|
||||||
_hn)
|
|
||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
# FIXME(GJ): should check cpu, memm and disk here?
|
# FIXME(GJ): should check cpu, memm and disk here?
|
||||||
@ -578,7 +578,7 @@ class Resource(object):
|
|||||||
for lgk in _host.memberships.keys():
|
for lgk in _host.memberships.keys():
|
||||||
if lgk not in self.logical_groups.keys():
|
if lgk not in self.logical_groups.keys():
|
||||||
self.logger.warn("logical group (%s) missing while "
|
self.logger.warn("logical group (%s) missing while "
|
||||||
"removing %s" % (lgk, _h_uuid))
|
"removing %s", lgk, _h_uuid)
|
||||||
continue
|
continue
|
||||||
lg = self.logical_groups[lgk]
|
lg = self.logical_groups[lgk]
|
||||||
|
|
||||||
@ -618,7 +618,7 @@ class Resource(object):
|
|||||||
for lgk in _host.memberships.keys():
|
for lgk in _host.memberships.keys():
|
||||||
if lgk not in self.logical_groups.keys():
|
if lgk not in self.logical_groups.keys():
|
||||||
self.logger.warn("logical group (%s) missing while "
|
self.logger.warn("logical group (%s) missing while "
|
||||||
"removing %s" % (lgk, _uuid))
|
"removing %s", lgk, _uuid)
|
||||||
continue
|
continue
|
||||||
lg = self.logical_groups[lgk]
|
lg = self.logical_groups[lgk]
|
||||||
|
|
||||||
|
@ -190,6 +190,10 @@ class HostGroup(object):
|
|||||||
for ck in self.child_resources.keys():
|
for ck in self.child_resources.keys():
|
||||||
child_list.append(ck)
|
child_list.append(ck)
|
||||||
|
|
||||||
|
parent_name = None
|
||||||
|
if self.parent_resource:
|
||||||
|
parent_name = self.parent_resource.name
|
||||||
|
|
||||||
return {'status': self.status,
|
return {'status': self.status,
|
||||||
'host_type': self.host_type,
|
'host_type': self.host_type,
|
||||||
'membership_list': membership_list,
|
'membership_list': membership_list,
|
||||||
@ -202,7 +206,7 @@ class HostGroup(object):
|
|||||||
'local_disk': self.local_disk_cap,
|
'local_disk': self.local_disk_cap,
|
||||||
'original_local_disk': self.original_local_disk_cap,
|
'original_local_disk': self.original_local_disk_cap,
|
||||||
'avail_local_disk': self.avail_local_disk_cap,
|
'avail_local_disk': self.avail_local_disk_cap,
|
||||||
'parent': self.parent_resource.name,
|
'parent': parent_name,
|
||||||
'children': child_list,
|
'children': child_list,
|
||||||
'vm_list': self.vm_list,
|
'vm_list': self.vm_list,
|
||||||
'last_update': self.last_update}
|
'last_update': self.last_update}
|
||||||
|
@ -104,7 +104,7 @@ class TopologyManager(threading.Thread):
|
|||||||
|
|
||||||
new_host.last_update = time.time()
|
new_host.last_update = time.time()
|
||||||
|
|
||||||
self.logger.warn("TopologyManager: new host (" +
|
self.logger.info("TopologyManager: new host (" +
|
||||||
new_host.name + ") added from configuration")
|
new_host.name + ") added from configuration")
|
||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
@ -116,7 +116,7 @@ class TopologyManager(threading.Thread):
|
|||||||
|
|
||||||
host.last_update = time.time()
|
host.last_update = time.time()
|
||||||
|
|
||||||
self.logger.warn("TopologyManager: host (" +
|
self.logger.info("TopologyManager: host (" +
|
||||||
host.name + ") removed from configuration")
|
host.name + ") removed from configuration")
|
||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ class TopologyManager(threading.Thread):
|
|||||||
|
|
||||||
new_host_group.last_update = time.time()
|
new_host_group.last_update = time.time()
|
||||||
|
|
||||||
self.logger.warn("TopologyManager: new host_group (" +
|
self.logger.info("TopologyManager: new host_group (" +
|
||||||
new_host_group.name + ") added")
|
new_host_group.name + ") added")
|
||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
@ -138,7 +138,7 @@ class TopologyManager(threading.Thread):
|
|||||||
|
|
||||||
host_group.last_update = time.time()
|
host_group.last_update = time.time()
|
||||||
|
|
||||||
self.logger.warn("TopologyManager: host_group (" +
|
self.logger.info("TopologyManager: host_group (" +
|
||||||
host_group.name + ") disabled")
|
host_group.name + ") disabled")
|
||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
@ -191,7 +191,7 @@ class TopologyManager(threading.Thread):
|
|||||||
if "infra" not in _rhost.tag:
|
if "infra" not in _rhost.tag:
|
||||||
_rhost.tag.append("infra")
|
_rhost.tag.append("infra")
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
self.logger.info("TopologyManager: host (" + _rhost.name +
|
||||||
") updated (tag)")
|
") updated (tag)")
|
||||||
|
|
||||||
if (_rhost.host_group is None or
|
if (_rhost.host_group is None or
|
||||||
@ -203,7 +203,7 @@ class TopologyManager(threading.Thread):
|
|||||||
else:
|
else:
|
||||||
_rhost.host_group = self.resource.datacenter
|
_rhost.host_group = self.resource.datacenter
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
self.logger.info("TopologyManager: host (" + _rhost.name +
|
||||||
") updated (host_group)")
|
") updated (host_group)")
|
||||||
|
|
||||||
return updated
|
return updated
|
||||||
@ -214,22 +214,23 @@ class TopologyManager(threading.Thread):
|
|||||||
if _hg.host_type != _rhg.host_type:
|
if _hg.host_type != _rhg.host_type:
|
||||||
_rhg.host_type = _hg.host_type
|
_rhg.host_type = _hg.host_type
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||||
") updated (hosting type)")
|
") updated (hosting type)")
|
||||||
|
|
||||||
if _rhg.status == "disabled":
|
if _rhg.status == "disabled":
|
||||||
_rhg.status = "enabled"
|
_rhg.status = "enabled"
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||||
") updated (enabled)")
|
") updated (enabled)")
|
||||||
|
|
||||||
|
if _hg.parent_resource != _rhg.parent_resource:
|
||||||
if _hg.parent_resource.name in self.resource.host_groups.keys():
|
if _hg.parent_resource.name in self.resource.host_groups.keys():
|
||||||
_rhg.parent_resource = \
|
_rhg.parent_resource = \
|
||||||
self.resource.host_groups[_hg.parent_resource.name]
|
self.resource.host_groups[_hg.parent_resource.name]
|
||||||
else:
|
else:
|
||||||
_rhg.parent_resource = self.resource.datacenter
|
_rhg.parent_resource = self.resource.datacenter
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||||
") updated (parent host_group)")
|
") updated (parent host_group)")
|
||||||
|
|
||||||
for rk in _hg.child_resources.keys():
|
for rk in _hg.child_resources.keys():
|
||||||
@ -244,7 +245,7 @@ class TopologyManager(threading.Thread):
|
|||||||
elif _rhg.host_type == "cluster":
|
elif _rhg.host_type == "cluster":
|
||||||
_rhg.child_resources[rk] = self.resource.host_groups[rk]
|
_rhg.child_resources[rk] = self.resource.host_groups[rk]
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||||
") updated (new child host)")
|
") updated (new child host)")
|
||||||
|
|
||||||
for rrk in _rhg.child_resources.keys():
|
for rrk in _rhg.child_resources.keys():
|
||||||
@ -256,7 +257,7 @@ class TopologyManager(threading.Thread):
|
|||||||
if exist is False:
|
if exist is False:
|
||||||
del _rhg.child_resources[rrk]
|
del _rhg.child_resources[rrk]
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||||
") updated (child host removed)")
|
") updated (child host removed)")
|
||||||
|
|
||||||
return updated
|
return updated
|
||||||
@ -268,7 +269,7 @@ class TopologyManager(threading.Thread):
|
|||||||
if rc not in self.resource.datacenter.region_code_list:
|
if rc not in self.resource.datacenter.region_code_list:
|
||||||
self.resource.datacenter.region_code_list.append(rc)
|
self.resource.datacenter.region_code_list.append(rc)
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: datacenter updated "
|
self.logger.info("TopologyManager: datacenter updated "
|
||||||
"(new region code, " + rc + ")")
|
"(new region code, " + rc + ")")
|
||||||
|
|
||||||
code_list = self.resource.datacenter.region_code_list
|
code_list = self.resource.datacenter.region_code_list
|
||||||
@ -278,7 +279,7 @@ class TopologyManager(threading.Thread):
|
|||||||
if alen != blen:
|
if alen != blen:
|
||||||
updated = True
|
updated = True
|
||||||
self.resource.datacenter.region_code_list = code_list
|
self.resource.datacenter.region_code_list = code_list
|
||||||
self.logger.warn("datacenter updated (region code removed)")
|
self.logger.info("datacenter updated (region code removed)")
|
||||||
|
|
||||||
for rk in _datacenter.resources.keys():
|
for rk in _datacenter.resources.keys():
|
||||||
exist = False
|
exist = False
|
||||||
@ -295,7 +296,7 @@ class TopologyManager(threading.Thread):
|
|||||||
self.resource.datacenter.resources[rk] = \
|
self.resource.datacenter.resources[rk] = \
|
||||||
self.resource.hosts[rk]
|
self.resource.hosts[rk]
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: datacenter updated "
|
self.logger.info("TopologyManager: datacenter updated "
|
||||||
"(new resource)")
|
"(new resource)")
|
||||||
|
|
||||||
for rrk in self.resource.datacenter.resources.keys():
|
for rrk in self.resource.datacenter.resources.keys():
|
||||||
@ -307,7 +308,7 @@ class TopologyManager(threading.Thread):
|
|||||||
if exist is False:
|
if exist is False:
|
||||||
del self.resource.datacenter.resources[rrk]
|
del self.resource.datacenter.resources[rrk]
|
||||||
updated = True
|
updated = True
|
||||||
self.logger.warn("TopologyManager: datacenter updated "
|
self.logger.info("TopologyManager: datacenter updated "
|
||||||
"(resource removed)")
|
"(resource removed)")
|
||||||
|
|
||||||
return updated
|
return updated
|
||||||
|
Loading…
x
Reference in New Issue
Block a user