Refactor the Resource Manager

Part of Support Nested Stacks and Updates story

Enhance the definition of data center resources and improve update collections.

Change-Id: I7843e589109ca7e5ab98265a19850c34ab1ad8f8
Story: #2001139
Task: #4857
This commit is contained in:
Jake Carlson 2017-08-08 08:12:07 -05:00
parent c03eb9873e
commit e96289acc2
18 changed files with 1853 additions and 1696 deletions

View File

@ -24,7 +24,7 @@ from valet.engine.optimizer.ostro.constraint_solver import ConstraintSolver
from valet.engine.optimizer.ostro.search_base import LogicalGroupResource
from valet.engine.optimizer.ostro.search_base import Node
from valet.engine.optimizer.ostro.search_base import Resource
from valet.engine.resource_manager.resource_base import Datacenter
from valet.engine.resource_manager.resources.datacenter import Datacenter
LOG = log.getLogger(__name__)

View File

@ -15,27 +15,22 @@
"""Compute Manager."""
from copy import deepcopy
import threading
import time
from oslo_log import log
from valet.engine.resource_manager.compute import Compute
from valet.engine.resource_manager.resource_base import Host
# from valet.engine.optimizer.simulator.compute_sim import ComputeSim
from valet.engine.resource_manager.nova_compute import NovaCompute
from valet.engine.resource_manager.resources.host import Host
LOG = log.getLogger(__name__)
class ComputeManager(threading.Thread):
"""Compute Manager Class.
"""Resource Manager to maintain compute host resources."""
Threaded class to setup and manage compute for resources, hosts,
flavors, etc. Calls many functions from Resource.
"""
def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config):
"""Init Compute Manager."""
def __init__(self, _t_id, _t_name, _resource, _data_lock, _config):
threading.Thread.__init__(self)
self.thread_id = _t_id
@ -43,231 +38,150 @@ class ComputeManager(threading.Thread):
self.data_lock = _data_lock
self.end_of_process = False
self.resource = _rsc
self.phandler = None
self.ahandler = None
self.resource = _resource
self.config = _config
self.admin_token = None
self.project_token = None
self.update_batch_wait = self.config.update_batch_wait
def run(self):
"""Start Compute Manager thread to run setup."""
LOG.info("ComputeManager: start " + self.thread_name +
" ......")
def set_handlers(self, _placement_handler, _app_handler):
"""Set handlers."""
self.phandler = _placement_handler
self.ahandler = _app_handler
def run(self):
"""Keep checking for timing for this batch job."""
LOG.info("start " + self.thread_name + " ......")
period_end = 0
if self.config.compute_trigger_freq > 0:
period_end = time.time() + self.config.compute_trigger_freq
while self.end_of_process is False:
time.sleep(60)
curr_ts = time.time()
if curr_ts > period_end:
# Give some time (batch_wait) to update resource status via
# message bus. Otherwise, late update will be cleaned up.
time_diff = curr_ts - self.resource.current_timestamp
if time_diff > self.update_batch_wait:
self._run()
period_end = (curr_ts +
self.config.compute_trigger_freq)
while self.end_of_process is False:
time.sleep(60)
# NOTE(GJ): do not timer based batch
LOG.info("exit compute_manager " + self.thread_name)
curr_ts = time.time()
if curr_ts > period_end:
# Give some time (batch_wait) to sync resource status via
# message bus
if ((curr_ts - self.resource.current_timestamp) >
self.update_batch_wait):
self._run()
period_end = time.time() + self.config.compute_trigger_freq
LOG.info("exit " + self.thread_name)
def _run(self):
LOG.info("ComputeManager: --- start compute_nodes "
"status update ---")
triggered_host_updates = self.set_hosts()
if triggered_host_updates is not True:
LOG.warning("fail to set hosts from nova")
triggered_flavor_updates = self.set_flavors()
if triggered_flavor_updates is not True:
LOG.warning("fail to set flavor from nova")
LOG.info("ComputeManager: --- done compute_nodes "
"status update ---")
return True
"""Run this batch job."""
if self.set_hosts() is not True:
LOG.warn("fail to set hosts from nova")
def set_hosts(self):
"""Return True if hosts set, compute avail resources, checks update."""
"""Check any inconsistency and perform garbage collection if necessary.
"""
LOG.info("set compute hosts")
hosts = {}
logical_groups = {}
compute = Compute()
status = compute.set_hosts(hosts, logical_groups)
if status != "success":
# compute = ComputeSim(self.config)
compute = NovaCompute()
if compute.set_hosts(hosts) != "success":
return False
self._compute_avail_host_resources(hosts)
for hk, host in hosts.iteritems():
self.resource.compute_avail_resources(hk, host)
self.data_lock.acquire()
lg_updated = self._check_logical_group_update(logical_groups)
host_updated = self._check_host_update(hosts)
if lg_updated is True or host_updated is True:
if self._check_host_update(hosts, compute.vm_locations) is True:
self.resource.update_topology(store=False)
self.data_lock.release()
return True
def _compute_avail_host_resources(self, _hosts):
for hk, host in _hosts.iteritems():
self.resource.compute_avail_resources(hk, host)
def _check_host_update(self, _hosts, _vm_locations):
"""Check the inconsistency of hosts."""
def _check_logical_group_update(self, _logical_groups):
updated = False
for lk in _logical_groups.keys():
if lk not in self.resource.logical_groups.keys():
self.resource.logical_groups[lk] = deepcopy(
_logical_groups[lk])
self.resource.logical_groups[lk].last_update = time.time()
LOG.warning("ComputeManager: new logical group (" +
lk + ") added")
updated = True
for rlk in self.resource.logical_groups.keys():
rl = self.resource.logical_groups[rlk]
if rl.group_type != "EX" and rl.group_type != "AFF" and \
rl.group_type != "DIV":
if rlk not in _logical_groups.keys():
self.resource.logical_groups[rlk].status = "disabled"
self.resource.logical_groups[rlk].last_update = time.time()
LOG.warning("ComputeManager: logical group (" +
rlk + ") removed")
updated = True
for lk in _logical_groups.keys():
lg = _logical_groups[lk]
rlg = self.resource.logical_groups[lk]
if lg.group_type != "EX" and lg.group_type != "AFF" and \
lg.group_type != "DIV":
if self._check_logical_group_metadata_update(lg, rlg) is True:
rlg.last_update = time.time()
LOG.warning("ComputeManager: logical group (" +
lk + ") updated")
updated = True
return updated
def _check_logical_group_metadata_update(self, _lg, _rlg):
updated = False
if _lg.status != _rlg.status:
_rlg.status = _lg.status
updated = True
for mdk in _lg.metadata.keys():
if mdk not in _rlg.metadata.keys():
_rlg.metadata[mdk] = _lg.metadata[mdk]
updated = True
for rmdk in _rlg.metadata.keys():
if rmdk not in _lg.metadata.keys():
del _rlg.metadata[rmdk]
updated = True
for hk in _lg.vms_per_host.keys():
if hk not in _rlg.vms_per_host.keys():
_rlg.vms_per_host[hk] = deepcopy(_lg.vms_per_host[hk])
updated = True
for rhk in _rlg.vms_per_host.keys():
if rhk not in _lg.vms_per_host.keys():
del _rlg.vms_per_host[rhk]
updated = True
return updated
def _check_host_update(self, _hosts):
updated = False
for hk in _hosts.keys():
if hk not in self.resource.hosts.keys():
new_host = Host(hk)
self.resource.hosts[new_host.name] = new_host
new_host.last_update = time.time()
LOG.warning("ComputeManager: new host (" +
new_host.name + ") added")
self.resource.update_host_time(new_host.name)
LOG.info("new host (" + new_host.name + ") added")
updated = True
for rhk, rhost in self.resource.hosts.iteritems():
if rhk not in _hosts.keys():
if "nova" in rhost.tag:
rhost.tag.remove("nova")
rhost.last_update = time.time()
LOG.warning("ComputeManager: host (" +
rhost.name + ") disabled")
self.resource.update_host_time(rhk)
LOG.info("host (" + rhost.name + ") disabled")
updated = True
for hk in _hosts.keys():
host = _hosts[hk]
rhost = self.resource.hosts[hk]
if self._check_host_config_update(host, rhost) is True:
rhost.last_update = time.time()
self.resource.update_host_time(hk)
updated = True
for hk, h in self.resource.hosts.iteritems():
if h.clean_memberships() is True:
h.last_update = time.time()
LOG.warning("ComputeManager: host (" + h.name +
") updated (delete EX/AFF/DIV membership)")
updated = True
for hk, host in self.resource.hosts.iteritems():
if host.last_update >= self.resource.current_timestamp:
self.resource.update_rack_resource(host)
inconsistent_hosts = self._check_placements(_hosts, _vm_locations)
if inconsistent_hosts is None:
return False
elif len(inconsistent_hosts) > 0:
for hk, h in inconsistent_hosts.iteritems():
if hk in _hosts.keys() and hk in self.resource.hosts.keys():
self.resource.update_host_time(hk)
updated = True
return updated
def _check_host_config_update(self, _host, _rhost):
topology_updated = False
"""Check host configuration consistency."""
config_updated = False
if self._check_host_status(_host, _rhost) is True:
topology_updated = True
if self._check_host_resources(_host, _rhost) is True:
topology_updated = True
if self._check_host_memberships(_host, _rhost) is True:
topology_updated = True
if self._check_host_vms(_host, _rhost) is True:
topology_updated = True
config_updated = True
return topology_updated
if self._check_host_resources(_host, _rhost) is True:
config_updated = True
return config_updated
def _check_host_status(self, _host, _rhost):
topology_updated = False
"""Check host status consistency."""
status_updated = False
if "nova" not in _rhost.tag:
_rhost.tag.append("nova")
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (tag added)")
LOG.info("host (" + _rhost.name + ") updated (tag added)")
status_updated = True
if _host.status != _rhost.status:
_rhost.status = _host.status
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (status changed)")
LOG.info("host (" + _rhost.name + ") updated (status changed)")
status_updated = True
if _host.state != _rhost.state:
_rhost.state = _host.state
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (state changed)")
LOG.info("host (" + _rhost.name + ") updated (state changed)")
status_updated = True
return topology_updated
return status_updated
def _check_host_resources(self, _host, _rhost):
topology_updated = False
"""Check the resource amount consistency."""
resource_updated = False
if _host.vCPUs != _rhost.vCPUs or \
_host.original_vCPUs != _rhost.original_vCPUs or \
@ -275,9 +189,8 @@ class ComputeManager(threading.Thread):
_rhost.vCPUs = _host.vCPUs
_rhost.original_vCPUs = _host.original_vCPUs
_rhost.avail_vCPUs = _host.avail_vCPUs
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (CPU updated)")
LOG.info("host (" + _rhost.name + ") updated (CPU updated)")
resource_updated = True
if _host.mem_cap != _rhost.mem_cap or \
_host.original_mem_cap != _rhost.original_mem_cap or \
@ -285,9 +198,8 @@ class ComputeManager(threading.Thread):
_rhost.mem_cap = _host.mem_cap
_rhost.original_mem_cap = _host.original_mem_cap
_rhost.avail_mem_cap = _host.avail_mem_cap
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (mem updated)")
LOG.info("host (" + _rhost.name + ") updated (mem updated)")
resource_updated = True
if _host.local_disk_cap != _rhost.local_disk_cap or \
_host.original_local_disk_cap != _rhost.original_local_disk_cap or \
@ -295,9 +207,9 @@ class ComputeManager(threading.Thread):
_rhost.local_disk_cap = _host.local_disk_cap
_rhost.original_local_disk_cap = _host.original_local_disk_cap
_rhost.avail_local_disk_cap = _host.avail_local_disk_cap
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (local disk space updated)")
LOG.info("host (" + _rhost.name +
") updated (local disk space updated)")
resource_updated = True
if _host.vCPUs_used != _rhost.vCPUs_used or \
_host.free_mem_mb != _rhost.free_mem_mb or \
@ -307,148 +219,178 @@ class ComputeManager(threading.Thread):
_rhost.free_mem_mb = _host.free_mem_mb
_rhost.free_disk_gb = _host.free_disk_gb
_rhost.disk_available_least = _host.disk_available_least
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (other resource numbers)")
LOG.info("host (" + _rhost.name +
") updated (other resource numbers)")
resource_updated = True
return topology_updated
return resource_updated
def _check_host_memberships(self, _host, _rhost):
topology_updated = False
def _check_placements(self, _hosts, _vm_locations):
"""Check the consistency of vm placements with nova."""
for mk in _host.memberships.keys():
if mk not in _rhost.memberships.keys():
_rhost.memberships[mk] = self.resource.logical_groups[mk]
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (new membership)")
inconsistent_hosts = {}
curr_time = time.time()
for mk in _rhost.memberships.keys():
m = _rhost.memberships[mk]
if m.group_type != "EX" and m.group_type != "AFF" and \
m.group_type != "DIV":
if mk not in _host.memberships.keys():
del _rhost.memberships[mk]
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (delete membership)")
for vk, hk in _vm_locations.iteritems():
placement = self.phandler.get_placement(vk)
if placement is None:
return None
return topology_updated
elif placement.uuid == "none":
LOG.info("unknown vm found in nova")
def _check_host_vms(self, _host, _rhost):
topology_updated = False
vm_info = _hosts[hk].get_vm_info(uuid=vk)
# Clean up VMs
blen = len(_rhost.vm_list)
_rhost.vm_list = [v for v in _rhost.vm_list if v[2] != "none"]
alen = len(_rhost.vm_list)
if alen != blen:
topology_updated = True
msg = "host ({0}) {1} none vms removed"
LOG.warning(msg.format(_rhost.name, str(blen - alen)))
p = self.phandler.insert_placement(vk, vm_info["stack_id"], hk,
vm_info["orch_id"],
"created")
if p is None:
return None
self.phandler.set_unverified(p.uuid)
self.resource.clean_none_vms_from_logical_groups(_rhost)
LOG.info("host (" + hk + ") updated (new unknown vm added)")
for vm_id in _host.vm_list:
if _rhost.exist_vm_by_uuid(vm_id[2]) is False:
_rhost.vm_list.append(vm_id)
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (new vm placed)")
rhost = self.resource.hosts[hk]
if rhost.exist_vm(uuid=vk):
rhost.remove_vm(uuid=vk)
for rvm_id in _rhost.vm_list:
if _host.exist_vm_by_uuid(rvm_id[2]) is False:
self.resource.remove_vm_by_uuid_from_logical_groups(
_rhost, rvm_id[2])
topology_updated = True
LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (vm removed)")
rhost.vm_list.append(vm_info)
inconsistent_hosts[hk] = rhost
blen = len(_rhost.vm_list)
_rhost.vm_list = [
v for v in _rhost.vm_list if _host.exist_vm_by_uuid(v[2]) is True]
alen = len(_rhost.vm_list)
if alen != blen:
topology_updated = True
msg = "host ({0}) {1} vms removed"
LOG.warning(msg.format(_rhost.name, str(blen - alen)))
# FIXME(gjung): add to corresponding groups with verification?
# currently, do this at bootstrap time and requested.
return topology_updated
else:
if hk != placement.host:
LOG.warn("PANIC: placed in different host")
def set_flavors(self):
"""Return True if compute set flavors returns success."""
flavors = {}
vm_info = _hosts[hk].get_vm_info(uuid=vk)
vm_info["stack_id"] = placement.stack_id
vm_info["orch_id"] = placement.orch_id
compute = Compute()
rhost = self.resource.hosts[hk]
if rhost.exist_vm(uuid=vk):
rhost.remove_vm(uuid=vk)
status = compute.set_flavors(flavors)
if status != "success":
LOG.error(status)
return False
rhost.vm_list.append(vm_info)
inconsistent_hosts[hk] = rhost
self.data_lock.acquire()
if self._check_flavor_update(flavors) is True:
self.resource.update_topology(store=False)
self.data_lock.release()
LOG.warn("host (" + rhost.name + ") updated (vm added)")
return True
# FIXME(gjung): add to corresponding groups with
# verification?
def _check_flavor_update(self, _flavors):
updated = False
if placement.host in self.resource.hosts.keys():
old_rhost = self.resource.hosts[placement.host]
if old_rhost.remove_vm(uuid=vk) is True:
LOG.warn("host (" + old_rhost.name +
") updated (vm removed)")
for fk in _flavors.keys():
if fk not in self.resource.flavors.keys():
self.resource.flavors[fk] = deepcopy(_flavors[fk])
inconsistent_hosts[placement.host] = old_rhost
self.resource.flavors[fk].last_update = time.time()
LOG.warning("ComputeManager: new flavor (" +
fk + ":" + _flavors[fk].flavor_id + ") added")
updated = True
self.resource.remove_vm_from_groups_of_host(old_rhost,
uuid=vk)
for rfk in self.resource.flavors.keys():
rf = self.resource.flavors[rfk]
if rfk not in _flavors.keys():
rf.status = "disabled"
placement.host = hk
placement.status = "none"
placement.timestamp = curr_time
if not self.phandler.store_placement(vk, placement):
return None
rf.last_update = time.time()
LOG.warning("ComputeManager: flavor (" + rfk + ":" +
rf.flavor_id + ") removed")
updated = True
if (placement.stack_id is not None or
placement.stack_id != "none"):
(vid, hk) = self.ahandler.update_stack(
placement.stack_id, uuid=vk, host=hk)
if vid is None:
return None
for fk in _flavors.keys():
f = _flavors[fk]
rf = self.resource.flavors[fk]
new_state = None
if placement.state is None or placement.state == "none":
new_state = "created"
if self._check_flavor_spec_update(f, rf) is True:
rf.last_update = time.time()
LOG.warning("ComputeManager: flavor (" + fk + ":" +
rf.flavor_id + ") spec updated")
updated = True
if placement.state not in ("created", "rebuilt", "migrated"):
LOG.warn("vm is incomplete state = " + placement.state)
return updated
if (placement.state == "planned" or
placement.state == "building"):
new_state = "created"
elif (placement.state == "rebuilding" or
placement.state == "rebuild"):
new_state = "rebuilt"
elif (placement.state == "migrating" or
placement.state == "migrate"):
new_state = "migrated"
def _check_flavor_spec_update(self, _f, _rf):
spec_updated = False
if new_state is not None:
placement.state = new_state
placement.timestamp = curr_time
if not self.phandler.store_placement(vk, placement):
return None
if _f.status != _rf.status:
_rf.status = _f.status
spec_updated = True
for rk, rhost in self.resource.hosts.iteritems():
deletion_list = []
if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or \
_f.disk_cap != _rf.disk_cap:
_rf.vCPUs = _f.vCPUs
_rf.mem_cap = _f.mem_cap
_rf.disk_cap = _f.disk_cap
spec_updated = True
for vm_info in rhost.vm_list:
if vm_info["uuid"] is None or vm_info["uuid"] == "none":
LOG.warn("host (" + rhost.name + ") pending vm removed")
for sk in _f.extra_specs.keys():
if sk not in _rf.extra_specs.keys():
_rf.extra_specs[sk] = _f.extra_specs[sk]
spec_updated = True
deletion_list.append(vm_info)
for rsk in _rf.extra_specs.keys():
if rsk not in _f.extra_specs.keys():
del _rf.extra_specs[rsk]
spec_updated = True
if (vm_info["stack_id"] is not None and
vm_info["stack_id"] != "none"):
if not self.ahandler.delete_from_stack(
vm_info["stack_id"], orch_id=vm_info["orch_id"]):
return None
return spec_updated
else:
placement = self.phandler.get_placement(vm_info["uuid"])
if placement is None:
return None
if vm_info["uuid"] not in _vm_locations.keys():
LOG.warn("vm is mising with state = " +
placement.state)
deletion_list.append(vm_info)
if (placement.stack_id is not None and
placement.stack_id != "none"):
if not self.ahandler.delete_from_stack(
placement.stack_id, uuid=vm_info["uuid"]):
return None
if not self.phandler.delete_placement(vm_info["uuid"]):
return None
elif _vm_locations[vm_info["uuid"]] != rk:
LOG.warn("placed in different host")
if rhost.remove_vm(uuid=vm_info["uuid"]) is True:
LOG.warn("host (" + rk + ") updated (vm removed)")
inconsistent_hosts[rk] = rhost
self.resource.remove_vm_from_groups_of_host(
rhost, uuid=vm_info["uuid"])
# FIXME(gjung): placement.status?
if len(deletion_list) > 0:
LOG.warn("host (" + rhost.name + ") updated (vms removed)")
inconsistent_hosts[rk] = rhost
for vm_info in deletion_list:
if (vm_info["orch_id"] is not None and
vm_info["orch_id"] != "none"):
rhost.remove_vm(orch_id=vm_info["orch_id"])
self.resource.remove_vm_from_groups(
rhost, orch_id=vm_info["orch_id"])
else:
if not rhost.remove_vm(uuid=vm_info["uuid"]):
LOG.warn("fail to remove vm from host")
self.resource.remove_vm_from_groups(
rhost, uuid=vm_info["uuid"])
return inconsistent_hosts

View File

@ -0,0 +1,51 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import client
from keystoneauth1 import loading
from keystoneauth1 import session
from oslo_config import cfg
VERSION = 1
CONF = cfg.CONF
class Heat(object):
def __init__(self, _logger):
self.logger = _logger
self.heat = None
def _set_heat_client(self):
'''Set connection to Heat API.'''
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=CONF.identity.auth_url,
username=CONF.identity.username,
password=CONF.identity.password,
project_name=CONF.identity.project_name)
sess = session.Session(auth=auth)
self.heat = client.Client('1', session=sess)
def get_stacks(self):
'''Return stacks, each of which is a JSON dict.'''
# collecting and formating to JSON dict.
# return stacks
pass

View File

@ -0,0 +1,268 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from copy import deepcopy
# from valet.engine.optimizer.simulator.compute_sim import ComputeSim
from valet.engine.resource_manager.nova_compute import NovaCompute
class MetadataManager(threading.Thread):
'''Metadata Manager to maintain flavors and groups.'''
def __init__(self, _t_id, _t_name, _resource, _data_lock, _config,
_logger):
threading.Thread.__init__(self)
self.thread_id = _t_id
self.thread_name = _t_name
self.data_lock = _data_lock
self.end_of_process = False
self.resource = _resource
self.config = _config
self.logger = _logger
self.update_batch_wait = self.config.update_batch_wait
def run(self):
'''Keep checking timing for this batch job.'''
self.logger.info("start " + self.thread_name + " ......")
period_end = 0
if self.config.metadata_trigger_freq > 0:
period_end = time.time() + self.config.metadata_trigger_freq
while self.end_of_process is False:
time.sleep(60)
curr_ts = time.time()
if curr_ts > period_end:
if ((curr_ts - self.resource.current_timestamp) >
self.update_batch_wait):
self._run()
period_end = time.time() + \
self.config.metadata_trigger_freq
self.logger.info("exit " + self.thread_name)
def _run(self):
'''Run this batch job.'''
if self.set_groups() is not True:
self.logger.warn("fail to set groups (availability zones and "
"host-aggregates) from nova")
if self.set_flavors() is not True:
self.logger.warn("fail to set flavor from nova")
return True
def set_groups(self):
'''Set groups (availability zones and host-aggregates) from nova.'''
self.logger.info("set metadata (groups)")
groups = {}
# compute = ComputeSim(self.config)
compute = NovaCompute()
if compute.set_groups(groups) != "success":
return False
self.data_lock.acquire()
self._check_group_update(groups)
if self._check_host_memberships_update(groups) is True:
self.resource.update_topology(store=False)
self.data_lock.release()
return True
def _check_group_update(self, _groups):
'''Check any inconsistency for groups.'''
for lk in _groups.keys():
if lk not in self.resource.groups.keys():
self.resource.groups[lk] = deepcopy(_groups[lk])
self.resource.groups[lk].last_update = time.time()
self.logger.info("new group (" + lk + ") added")
for rlk in self.resource.groups.keys():
rl = self.resource.groups[rlk]
if (rl.group_type != "EX" and rl.group_type != "AFF" and
rl.group_type != "DIV"):
if rlk not in _groups.keys():
self.resource.groups[rlk].status = "disabled"
self.resource.groups[rlk].last_update = time.time()
self.logger.info("group (" + rlk + ") disabled")
for lk in _groups.keys():
lg = _groups[lk]
rlg = self.resource.groups[lk]
if (lg.group_type != "EX" and lg.group_type != "AFF" and
lg.group_type != "DIV"):
if self._check_group_metadata_update(lg, rlg) is True:
rlg.last_update = time.time()
self.logger.info("group (" + lk + ") updated")
def _check_group_metadata_update(self, _lg, _rlg):
'''Check any change in status and metadata of group.'''
updated = False
if _lg.status != _rlg.status:
_rlg.status = _lg.status
updated = True
for mdk in _lg.metadata.keys():
if mdk not in _rlg.metadata.keys():
_rlg.metadata[mdk] = _lg.metadata[mdk]
updated = True
for rmdk in _rlg.metadata.keys():
if rmdk not in _lg.metadata.keys():
del _rlg.metadata[rmdk]
updated = True
for hk in _lg.vms_per_host.keys():
if hk not in _rlg.vms_per_host.keys():
_rlg.vms_per_host[hk] = deepcopy(_lg.vms_per_host[hk])
updated = True
for rhk in _rlg.vms_per_host.keys():
if rhk not in _lg.vms_per_host.keys():
del _rlg.vms_per_host[rhk]
updated = True
return updated
def _check_host_memberships_update(self, _groups):
'''Check host memberships consistency.'''
membership_updated = False
for lgk, lg in _groups.iteritems():
for hk in lg.vms_per_host.keys():
if hk in self.resource.hosts.keys():
rhost = self.resource.hosts[hk]
if lgk not in rhost.memberships.keys():
rhost.memberships[lgk] = self.resource.groups[lgk]
self.resource.update_host_time(hk)
membership_updated = True
self.logger.info("host (" + rhost.name +
") updated (new membership)")
for rhk, rhost in self.resource.hosts.iteritems():
if rhost.check_availability() is True:
for mk in rhost.memberships.keys():
m = rhost.memberships[mk]
if (m.group_type != "EX" and m.group_type != "AFF" and
m.group_type != "DIV"):
if mk not in _groups.keys():
del rhost.memberships[mk]
self.resource.update_host_time(rhk)
membership_updated = True
self.logger.info("host (" + rhost.name +
") updated (delete membership)")
else:
lg = _groups[mk]
if rhk not in lg.vms_per_host.keys():
del rhost.memberships[mk]
self.resource.update_host_time(rhk)
membership_updated = True
self.logger.info("host (" + rhost.name + ") "
"updated (delete membership)")
return membership_updated
def set_flavors(self):
'''Set flavors from nova.'''
self.logger.info("set metadata (flavors)")
flavors = {}
# compute = ComputeSim(self.config)
compute = NovaCompute()
if compute.set_flavors(flavors) != "success":
return False
self.data_lock.acquire()
self._check_flavor_update(flavors)
self.data_lock.release()
return True
def _check_flavor_update(self, _flavors):
'''Check flavor info consistency.'''
for fk in _flavors.keys():
if fk not in self.resource.flavors.keys():
self.resource.flavors[fk] = deepcopy(_flavors[fk])
self.resource.flavors[fk].last_update = time.time()
self.logger.info("new flavor (" + fk + ":" +
_flavors[fk].flavor_id + ") added")
for rfk in self.resource.flavors.keys():
rf = self.resource.flavors[rfk]
if rfk not in _flavors.keys():
rf.status = "disabled"
rf.last_update = time.time()
self.logger.info("flavor (" + rfk + ":" + rf.flavor_id +
") removed")
for fk in _flavors.keys():
f = _flavors[fk]
rf = self.resource.flavors[fk]
if self._check_flavor_spec_update(f, rf) is True:
rf.last_update = time.time()
self.logger.info("flavor (" + fk + ":" + rf.flavor_id +
") spec updated")
def _check_flavor_spec_update(self, _f, _rf):
'''Check flavor's spec consistency.'''
spec_updated = False
if _f.status != _rf.status:
_rf.status = _f.status
spec_updated = True
if (_f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or
_f.disk_cap != _rf.disk_cap):
_rf.vCPUs = _f.vCPUs
_rf.mem_cap = _f.mem_cap
_rf.disk_cap = _f.disk_cap
spec_updated = True
for sk in _f.extra_specs.keys():
if sk not in _rf.extra_specs.keys():
_rf.extra_specs[sk] = _f.extra_specs[sk]
spec_updated = True
for rsk in _rf.extra_specs.keys():
if rsk not in _f.extra_specs.keys():
del _rf.extra_specs[rsk]
spec_updated = True
return spec_updated

View File

@ -0,0 +1,145 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from sre_parse import isdigit
from valet.engine.resource_manager.resources.host_group import HostGroup
class Naming(object):
'''Naming Convention Based Topology
Topology generated by cannonical naming convention to capture datacenter
layout.
'''
def __init__(self, _config, _logger):
self.config = _config
self.logger = _logger
def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts):
'''Set datacenter layout structure.'''
status = self._set_host_topology(_datacenter, _host_groups, _hosts,
_rhosts)
return status
def _set_host_topology(self, _datacenter, _host_groups, _hosts, _rhosts):
'''Set datacenter resource structure (racks, hosts).'''
status = "success"
for rhk, rh in _rhosts.iteritems():
h = copy.deepcopy(rh)
if "infra" not in h.tag:
h.tag.append("infra")
(rack_name, parsing_status) = self._set_layout_by_name(rhk)
if parsing_status != "success":
self.logger.warn(parsing_status + " in host_name (" + rhk +
")")
if rack_name not in _host_groups.keys():
host_group = HostGroup(rack_name)
host_group.host_type = "rack"
_host_groups[host_group.name] = host_group
h.host_group = _host_groups[rack_name]
_hosts[h.name] = h
for hgk, hg in _host_groups.iteritems():
hg.parent_resource = _datacenter
for _, h in _hosts.iteritems():
if h.host_group.name == hgk:
hg.child_resources[h.name] = h
_datacenter.resources[hgk] = hg
if "none" in _host_groups.keys():
self.logger.warn("some hosts are into unknown rack")
return status
def _set_layout_by_name(self, _host_name):
'''Set the rack-host layout, use host nameing convention.'''
region_name = None
rack_name = None
host_name = None
index = 0
end_of_region_index = 0
end_of_rack_index = 0
end_of_host_index = 0
status = "success"
for c in _host_name:
if index >= self.config.num_of_region_chars:
if not isdigit(c):
if end_of_region_index == 0:
if c not in self.config.rack_code_list:
status = "invalid rack_char = " + c + \
". missing rack_char = r"
break
end_of_region_index = index
if (end_of_rack_index == 0 and end_of_region_index > 0 and
index > end_of_region_index):
if c not in self.config.node_code_list:
status = "invalid node_char = " + c + \
". missing node_char = a,c,u,f,o,p,s"
break
end_of_rack_index = index
# when a non digit char is found after parsing host
# identifier
if (end_of_host_index == 0 and end_of_rack_index > 0 and
index > end_of_rack_index):
end_of_host_index = index
break
# when a '.' char is found after parsing host identifier
if end_of_host_index == 0 and c == '.':
end_of_host_index = index
break
index += 1
if status == "success":
region_name = _host_name[:end_of_region_index]
rack_name = _host_name[end_of_region_index:end_of_rack_index]
if end_of_host_index > 0:
host_name = _host_name[end_of_rack_index:end_of_host_index]
else:
host_name = _host_name[end_of_rack_index:]
if len(region_name) <= self.config.num_of_region_chars:
status = "incorrect format of region name = " + region_name
return ('none', status)
if len(rack_name) < 2:
status = "incorrect format of rack name = " + rack_name
return ('none', status)
if len(host_name) < 2:
status = "incorrect format of compute host name = " + host_name
return ('none', status)
rack_full_name = region_name + rack_name
return (rack_full_name, status)
else:
return ('none', status)

View File

@ -12,16 +12,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from novaclient import client as nova_client
from oslo_config import cfg
from oslo_log import log
from resource_base import Flavor
from resource_base import Host
from resource_base import LogicalGroup
from valet.engine.resource_manager.resources.flavor import Flavor
from valet.engine.resource_manager.resources.group import Group
from valet.engine.resource_manager.resources.host import Host
# Nova API v2
VERSION = 2
@ -30,93 +30,71 @@ CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Compute(object):
"""Compute Class.
This class performs functions of setting hosts, availability zones,
aggregates, placed vms, resources, flavors, etc.
Interacts with nova client to perform these actions.
"""
class NovaCompute(object):
"""Dispatcher to collect resource status from nova."""
def __init__(self):
"""Compute init."""
self.nova = None
def set_hosts(self, _hosts, _logical_groups):
"""Return success if az's, aggregates, vms, resources, all set."""
self._get_nova_client()
status = self._set_availability_zones(_hosts, _logical_groups)
if status != "success":
LOG.error('_set_availability_zones failed')
return status
status = self._set_aggregates(_hosts, _logical_groups)
if status != "success":
LOG.error('_set_aggregates failed')
return status
status = self._set_placed_vms(_hosts, _logical_groups)
if status != "success":
LOG.error('_set_placed_vms failed')
return status
status = self._set_resources(_hosts)
if status != "success":
LOG.error('_set_resources failed')
return status
return "success"
self.vm_locations = {}
def _get_nova_client(self):
"""Return a nova client."""
"""Get a nova client."""
self.nova = nova_client.Client(VERSION,
CONF.identity.username,
CONF.identity.password,
CONF.identity.project_name,
CONF.identity.auth_url)
def _set_availability_zones(self, _hosts, _logical_groups):
try:
hosts_list = self.nova.hosts.list()
def set_groups(self, _groups):
"""Set availability zones and host-aggregates from nova."""
try:
for h in hosts_list:
if h.service == "compute":
host = Host(h.host_name)
host.tag.append("nova")
self._get_nova_client()
logical_group = None
if h.zone not in _logical_groups.keys():
logical_group = LogicalGroup(h.zone)
logical_group.group_type = "AZ"
_logical_groups[logical_group.name] = logical_group
else:
logical_group = _logical_groups[h.zone]
status = self._set_availability_zones(_groups)
if status != "success":
LOG.error(status)
return status
host.memberships[logical_group.name] = logical_group
if host.name not in logical_group.vms_per_host.keys():
logical_group.vms_per_host[host.name] = []
_hosts[host.name] = host
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "Error while setting host zones from Nova"
except Exception:
LOG.critical(traceback.format_exc())
status = self._set_aggregates(_groups)
if status != "success":
LOG.error(status)
return status
return "success"
def _set_aggregates(self, _hosts, _logical_groups):
aggregate_list = self.nova.aggregates.list()
def _set_availability_zones(self, _groups):
"""Set AZs."""
try:
hosts_list = self.nova.hosts.list()
for h in hosts_list:
if h.service == "compute":
group = None
if h.zone not in _groups.keys():
group = Group(h.zone)
group.group_type = "AZ"
_groups[group.name] = group
else:
group = _groups[h.zone]
if h.host_name not in group.vms_per_host.keys():
group.vms_per_host[h.host_name] = []
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "error while setting host zones from Nova"
return "success"
def _set_aggregates(self, _groups):
"""Set host-aggregates and corresponding hosts."""
try:
aggregate_list = self.nova.aggregates.list()
for a in aggregate_list:
aggregate = LogicalGroup(a.name)
aggregate = Group(a.name)
aggregate.group_type = "AGGR"
if a.deleted is not False:
aggregate.status = "disabled"
@ -126,93 +104,122 @@ class Compute(object):
metadata[mk] = a.metadata.get(mk)
aggregate.metadata = metadata
_logical_groups[aggregate.name] = aggregate
_groups[aggregate.name] = aggregate
for hn in a.hosts:
host = _hosts[hn]
host.memberships[aggregate.name] = aggregate
aggregate.vms_per_host[host.name] = []
aggregate.vms_per_host[hn] = []
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "Error while setting host aggregates from Nova"
return "error while setting host aggregates from Nova"
return "success"
def _set_placed_vms(self, _hosts, _logical_groups):
error_status = None
def set_hosts(self, _hosts):
"""Set host resources info."""
for hk in _hosts.keys():
vm_uuid_list = []
result_status = self._get_vms_of_host(hk, vm_uuid_list)
self._get_nova_client()
if result_status == "success":
for vm_uuid in vm_uuid_list:
vm_detail = [] # (vm_name, az, metadata, status)
result_status_detail = self._get_vm_detail(vm_uuid,
vm_detail)
status = self._set_hosts(_hosts)
if status != "success":
LOG.error(status)
return status
if result_status_detail == "success":
vm_id = ("none", vm_detail[0], vm_uuid)
_hosts[hk].vm_list.append(vm_id)
# FIXME(GJ): should track logical groups (e.g., AZ)?
else:
error_status = result_status_detail
break
else:
error_status = result_status
status = self._set_placed_vms(_hosts)
if status != "success":
LOG.error(status)
return status
if error_status is not None:
break
status = self._set_resources(_hosts)
if status != "success":
LOG.error(status)
return status
if error_status is None:
return "success"
else:
return error_status
return "success"
def _get_vms_of_host(self, _hk, _vm_list):
hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk,
servers=True)
def _set_hosts(self, _hosts):
"""Init hosts."""
try:
hosts_list = self.nova.hosts.list()
for h in hosts_list:
if h.service == "compute":
host = Host(h.host_name)
host.tag.append("nova")
_hosts[host.name] = host
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "error while setting hosts from Nova"
return "success"
def _set_placed_vms(self, _hosts):
"""Track and set vms to hosts and groups."""
for hk in _hosts.keys():
result_status = self._get_vms_of_host(hk)
if result_status != "success":
return result_status
for vm_uuid, hk in self.vm_locations.iteritems():
vm_info = {}
vm_info["uuid"] = vm_uuid
vm_info["stack_id"] = "none"
vm_info["orch_id"] = "none"
result_status_detail = self._get_vm_detail(vm_info)
if result_status_detail == "success":
_hosts[hk].vm_list.append(vm_info)
else:
return result_status_detail
return "success"
def _get_vms_of_host(self, _hk):
"""Get vms of this host."""
try:
hypervisor_list = self.nova.hypervisors.search(
hypervisor_match=_hk, servers=True)
for hv in hypervisor_list:
if hasattr(hv, 'servers'):
server_list = hv.__getattr__('servers')
for s in server_list:
_vm_list.append(s['uuid'])
self.vm_locations[s['uuid']] = _hk
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "Error while getting existing vms"
return "error while getting existing vms from nova"
return "success"
def _get_vm_detail(self, _vm_uuid, _vm_detail):
server = self.nova.servers.get(_vm_uuid)
def _get_vm_detail(self, _vm_info):
"""Get the detail of vm."""
try:
vm_name = server.name
_vm_detail.append(vm_name)
az = server.__getattr__("OS-EXT-AZ:availability_zone")
_vm_detail.append(az)
metadata = server.metadata
_vm_detail.append(metadata)
status = server.status
_vm_detail.append(status)
server = self.nova.servers.get(_vm_info["uuid"])
_vm_info["name"] = server.name
_vm_info["availability_zone"] = \
server.__getattr__("OS-EXT-AZ:availability_zone")
_vm_info["flavor_id"] = server.flavor["id"]
# FIXME(gjung): image
# FIXME(gjung): metadata contains stack-id
_vm_info["metadata"] = server.metadata
_vm_info["status"] = server.status
_vm_info["tenant_id"] = server.tenant_id
# FIXME(gjung): scheduler_hints?
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "Error while getting vm detail"
return "error while getting vm detail from nova"
return "success"
def _set_resources(self, _hosts):
'''Returns Hypervisor list '''
host_list = self.nova.hypervisors.list()
"""Set Hypervisor list."""
try:
host_list = self.nova.hypervisors.list()
for hv in host_list:
if hv.service['host'] in _hosts.keys():
host = _hosts[hv.service['host']]
@ -225,21 +232,20 @@ class Compute(object):
host.original_local_disk_cap = float(hv.local_gb)
host.free_disk_gb = float(hv.free_disk_gb)
host.disk_available_least = float(hv.disk_available_least)
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "Error while setting host resources from Nova"
return "error while setting host resources from Nova"
return "success"
def set_flavors(self, _flavors):
"""Set flavors."""
error_status = None
self._get_nova_client()
result_status = self._set_flavors(_flavors)
if result_status == "success":
for _, f in _flavors.iteritems():
result_status_detail = self._set_extra_specs(f)
@ -252,14 +258,15 @@ class Compute(object):
if error_status is None:
return "success"
else:
LOG.error(error_status)
return error_status
def _set_flavors(self, _flavors):
'''Get a list of all flavors.'''
flavor_list = self.nova.flavors.list()
"""Set a list of all flavors."""
try:
flavor_list = self.nova.flavors.list()
for f in flavor_list:
flavor = Flavor(f.name)
flavor.flavor_id = f.id
@ -269,47 +276,39 @@ class Compute(object):
flavor.vCPUs = float(f.vcpus)
flavor.mem_cap = float(f.ram)
root_gb = float(f.disk)
ephemeral_gb = 0.0
if hasattr(f, "OS-FLV-EXT-DATA:ephemeral"):
ephemeral_gb = float(getattr(f,
"OS-FLV-EXT-DATA:ephemeral"))
ephemeral_gb = float(
getattr(f, "OS-FLV-EXT-DATA:ephemeral"))
swap_mb = 0.0
if hasattr(f, "swap"):
sw = getattr(f, "swap")
if sw != '':
swap_mb = float(sw)
flavor.disk_cap = (
root_gb + ephemeral_gb + swap_mb / float(1024))
flavor.disk_cap = root_gb + ephemeral_gb + swap_mb \
/ float(1024)
_flavors[flavor.name] = flavor
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "Error while getting flavors"
return "error while getting flavors"
return "success"
def _set_extra_specs(self, _flavor):
"""Set each flavor's extra-specs."""
try:
# Get a list of all flavors
flavors_list = self.nova.flavors.list()
# Get flavor from flavor_list
for flavor in flavors_list:
if flavor.id == _flavor.flavor_id:
extra_specs = flavor.get_keys()
for sk, sv in extra_specs.iteritems():
_flavor.extra_specs[sk] = sv
break
except (ValueError, KeyError, TypeError):
LOG.error(traceback.format_exc())
return "Error while getting flavor extra spec"
return "error while getting extra spec for flavor"
return "success"

View File

@ -12,34 +12,31 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import time
import traceback
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.resource_manager.resource_base import Datacenter
from valet.engine.resource_manager.resource_base import Flavor
from valet.engine.resource_manager.resource_base import Host
from valet.engine.resource_manager.resource_base import HostGroup
from valet.engine.resource_manager.resource_base import LogicalGroup
from valet.engine.optimizer.app_manager.group import LEVEL
from valet.engine.resource_manager.resources.datacenter import Datacenter
from valet.engine.resource_manager.resources.flavor import Flavor
from valet.engine.resource_manager.resources.group import Group
from valet.engine.resource_manager.resources.host import Host
from valet.engine.resource_manager.resources.host_group import HostGroup
LOG = log.getLogger(__name__)
class Resource(object):
"""Resource Class.
"""Container and Handler to deal with change of datacenter resource status.
This class bootsraps the resources from the database and initializes
them using base resources (datacenter, host group, host, logical group).
Also manages aggregate status of resources and metadata and handles
updates to base resource types.
"""
def __init__(self, _db, _config):
"""Init Resource Class."""
self.db = _db
self.config = _config
""" resource data """
@ -47,8 +44,7 @@ class Resource(object):
self.host_groups = {}
self.hosts = {}
""" metadata """
self.logical_groups = {}
self.groups = {}
self.flavors = {}
self.current_timestamp = 0
@ -60,28 +56,27 @@ class Resource(object):
self.CPU_avail = 0
self.mem_avail = 0
self.local_disk_avail = 0
self.disk_avail = 0
self.nw_bandwidth_avail = 0
# FIXME(GJ): should check invalid return here?
def bootstrap_from_db(self, _resource_status):
"""Return True if bootsrap resource from database successful."""
def load_from_db(self, _resource_status):
"""Load all resource status info from DB."""
LOG.info("load prior data")
try:
LOG.info("Resource status from DB = %s", _resource_status)
logical_groups = _resource_status.get("logical_groups")
if logical_groups:
for lgk, lg in logical_groups.iteritems():
logical_group = LogicalGroup(lgk)
logical_group.group_type = lg.get("group_type")
logical_group.status = lg.get("status")
logical_group.metadata = lg.get("metadata")
logical_group.vm_list = lg.get("vm_list")
logical_group.vms_per_host = lg.get("vms_per_host")
groups = _resource_status.get("groups")
if groups:
for lgk, lg in groups.iteritems():
group = Group(lgk)
group.group_type = lg.get("group_type")
group.status = lg.get("status")
group.metadata = lg.get("metadata")
group.vm_list = lg.get("vm_list")
group.vms_per_host = lg.get("vms_per_host")
self.logical_groups[lgk] = logical_group
self.groups[lgk] = group
if len(self.logical_groups) == 0:
LOG.warning("no logical_groups")
if len(self.groups) == 0:
LOG.warn("no groups in db record")
flavors = _resource_status.get("flavors")
if flavors:
@ -97,7 +92,7 @@ class Resource(object):
self.flavors[fk] = flavor
if len(self.flavors) == 0:
LOG.error("fail loading flavors")
LOG.warn("no flavors in db record")
hosts = _resource_status.get("hosts")
if hosts:
@ -122,12 +117,12 @@ class Resource(object):
host.vm_list = h.get("vm_list")
for lgk in h["membership_list"]:
host.memberships[lgk] = self.logical_groups[lgk]
host.memberships[lgk] = self.groups[lgk]
self.hosts[hk] = host
if len(self.hosts) == 0:
LOG.error("fail loading hosts")
LOG.warn("no hosts in db record")
host_groups = _resource_status.get("host_groups")
if host_groups:
@ -149,12 +144,12 @@ class Resource(object):
host_group.vm_list = hg.get("vm_list")
for lgk in hg.get("membership_list"):
host_group.memberships[lgk] = self.logical_groups[lgk]
host_group.memberships[lgk] = self.groups[lgk]
self.host_groups[hgk] = host_group
if len(self.host_groups) == 0:
LOG.warning("fail loading host_groups")
LOG.warn("no host_groups (rack)")
dc = _resource_status.get("datacenter")
if dc:
@ -175,7 +170,7 @@ class Resource(object):
self.datacenter.vm_list = dc.get("vm_list")
for lgk in dc.get("membership_list"):
self.datacenter.memberships[lgk] = self.logical_groups[lgk]
self.datacenter.memberships[lgk] = self.groups[lgk]
for ck in dc.get("children"):
if ck in self.host_groups.keys():
@ -184,7 +179,7 @@ class Resource(object):
self.datacenter.resources[ck] = self.hosts[ck]
if len(self.datacenter.resources) == 0:
LOG.error("fail loading datacenter")
LOG.warn("fail loading datacenter")
hgs = _resource_status.get("host_groups")
if hgs:
@ -224,9 +219,11 @@ class Resource(object):
return True
def update_topology(self, store=True):
"""Update Topology and return True, if store True then store update."""
self._update_topology()
"""Update resource status triggered by placements, events, and batch.
"""
self._update_topology()
self._update_compute_avail()
if store is False:
@ -235,8 +232,10 @@ class Resource(object):
return self.store_topology_updates()
def _update_topology(self):
"""Update host group (rack) and datacenter status."""
updated = False
for level in LEVELS:
for level in LEVEL:
for _, host_group in self.host_groups.iteritems():
if (host_group.host_type == level and
host_group.check_availability()):
@ -253,6 +252,8 @@ class Resource(object):
self.resource_updated = True
def _update_host_group_topology(self, _host_group):
"""Update host group (rack) status."""
_host_group.init_resources()
del _host_group.vm_list[:]
@ -269,8 +270,8 @@ class Resource(object):
host.original_local_disk_cap
_host_group.avail_local_disk_cap += host.avail_local_disk_cap
for vm_id in host.vm_list:
_host_group.vm_list.append(vm_id)
for vm_info in host.vm_list:
_host_group.vm_list.append(vm_info)
_host_group.init_memberships()
@ -280,6 +281,8 @@ class Resource(object):
_host_group.memberships[mk] = host.memberships[mk]
def _update_datacenter_topology(self):
"""Update datacenter status."""
self.datacenter.init_resources()
del self.datacenter.vm_list[:]
self.datacenter.memberships.clear()
@ -305,14 +308,20 @@ class Resource(object):
self.datacenter.memberships[mk] = resource.memberships[mk]
def _update_compute_avail(self):
"""Update amount of total available resources."""
self.CPU_avail = self.datacenter.avail_vCPUs
self.mem_avail = self.datacenter.avail_mem_cap
self.local_disk_avail = self.datacenter.avail_local_disk_cap
def store_topology_updates(self):
"""Store resource status in batch."""
if not self.resource_updated:
return True
updated = False
flavor_updates = {}
logical_group_updates = {}
group_updates = {}
host_updates = {}
host_group_updates = {}
datacenter_update = None
@ -324,9 +333,9 @@ class Resource(object):
flavor_updates[fk] = flavor.get_json_info()
updated = True
for lgk, lg in self.logical_groups.iteritems():
for lgk, lg in self.groups.iteritems():
if lg.last_update >= self.curr_db_timestamp:
logical_group_updates[lgk] = lg.get_json_info()
group_updates[lgk] = lg.get_json_info()
updated = True
for hk, host in self.hosts.iteritems():
@ -343,196 +352,118 @@ class Resource(object):
datacenter_update = self.datacenter.get_json_info()
updated = True
# NOTE(GJ): do not track resource change histroy in this version
if updated is True:
if updated:
json_logging = {}
json_logging['timestamp'] = self.curr_db_timestamp
if len(flavor_updates) > 0:
json_logging['flavors'] = flavor_updates
if len(logical_group_updates) > 0:
json_logging['logical_groups'] = logical_group_updates
if len(group_updates) > 0:
json_logging['groups'] = group_updates
if len(host_updates) > 0:
json_logging['hosts'] = host_updates
if len(host_group_updates) > 0:
json_logging['host_groups'] = host_group_updates
if datacenter_update is not None:
json_logging['datacenter'] = datacenter_update
if not self.db.update_resource_status(
self.datacenter.name, json_logging):
return None
if not self.db.update_resource_status(self.datacenter.name,
json_logging):
return False
self.curr_db_timestamp = time.time()
self.resource_updated = False
return True
def show_current_logical_groups(self):
LOG.debug("--- track logical groups info ---")
for lgk, lg in self.logical_groups.iteritems():
if lg.status == "enabled":
LOG.debug("lg name = " + lgk)
LOG.debug(" type = " + lg.group_type)
if lg.group_type == "AGGR":
for k in lg.metadata.keys():
LOG.debug(" metadata key = " + k)
LOG.debug(" vms")
debug_msg = " orch_id = %s uuid = %s"
for v in lg.vm_list:
LOG.debug(debug_msg % (v[0], v[2]))
LOG.debug(" hosts")
for h, v in lg.vms_per_host.iteritems():
LOG.debug(" host = %s" % h)
LOG.debug(" vms = %s" %
str(len(lg.vms_per_host[h])))
host = None
if h in self.hosts.keys():
host = self.hosts[h]
elif h in self.host_groups.keys():
host = self.host_groups[h]
else:
LOG.error("TEST: lg member not exist")
if host is not None:
LOG.debug(" status = " + host.status)
if lgk not in host.memberships.keys():
LOG.error("TEST: membership missing")
def show_current_host_status(self):
LOG.debug("--- track host info ---")
for hk, h in self.hosts.iteritems():
LOG.debug("host name = " + hk)
LOG.debug(" status = " + h.status + ", " + h.state)
LOG.debug(" vms = " + str(len(h.vm_list)))
LOG.debug(" resources (org, total, avail, used)")
cpu_org = str(h.original_vCPUs)
cpu_tot = str(h.vCPUs)
cpu_avail = str(h.avail_vCPUs)
cpu_used = str(h.vCPUs_used)
msg = " {0} = {1}, {2}, {3}, {4}"
LOG.debug(
msg.format('cpu', cpu_org, cpu_tot, cpu_avail, cpu_used))
mem_org = str(h.original_mem_cap)
mem_tot = str(h.mem_cap)
mem_avail = str(h.avail_mem_cap)
mem_used = str(h.free_mem_mb)
LOG.debug(
msg.format('mem', mem_org, mem_tot, mem_avail, mem_used))
dsk_org = str(h.original_local_disk_cap)
dsk_tot = str(h.local_disk_cap)
dsk_avail = str(h.avail_local_disk_cap)
dsk_used = str(h.free_disk_gb)
LOG.debug(
msg.format('disk', dsk_org, dsk_tot, dsk_avail, dsk_used))
LOG.debug(" memberships")
for mk in h.memberships.keys():
LOG.debug(" " + mk)
if mk not in self.logical_groups.keys():
LOG.error("TEST: lg missing")
def update_rack_resource(self, _host):
"""Update resources for rack (host), then update cluster."""
"""Mark the host update time for batch resource status update."""
rack = _host.host_group
if rack is not None:
rack.last_update = time.time()
if isinstance(rack, HostGroup):
self.update_cluster_resource(rack)
def update_cluster_resource(self, _rack):
"""Update cluster rack belonged to, then update datacenter."""
"""Mark the host update time for batch resource status update."""
cluster = _rack.parent_resource
if cluster is not None:
cluster.last_update = time.time()
if isinstance(cluster, HostGroup):
self.datacenter.last_update = time.time()
def get_uuid(self, _h_uuid, _host_name):
"""Return host uuid."""
def get_uuid(self, _orch_id, _host_name):
host = self.hosts[_host_name]
return host.get_uuid(_orch_id)
return host.get_uuid(_h_uuid)
def add_vm_to_host(self, _vm_alloc, _vm_info):
"""Add vm to host and update the amount of available resource."""
def add_vm_to_host(self, _host_name, _vm_id, _vcpus, _mem, _ldisk):
"""Add vm to host and adjust compute resources for host."""
host = self.hosts[_host_name]
host = self.hosts[_vm_alloc["host"]]
host.vm_list.append(_vm_id)
if host.exist_vm(orch_id=_vm_info["orch_id"], uuid=_vm_info["uuid"]):
LOG.warn("vm already exists in the host")
host.avail_vCPUs -= _vcpus
host.avail_mem_cap -= _mem
host.avail_local_disk_cap -= _ldisk
# host.remove_vm(orch_id=_vm_info["orch_id"],
# uuid=_vm_info["uuid"])
self.remove_vm_from_host(_vm_alloc, orch_id=_vm_info["orch_id"],
uuid=_vm_info["uuid"])
host.vCPUs_used += _vcpus
host.free_mem_mb -= _mem
host.free_disk_gb -= _ldisk
host.disk_available_least -= _ldisk
host.vm_list.append(_vm_info)
def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem,
_ldisk):
"""Remove vm from host by h_uuid, adjust compute resources for host."""
host = self.hosts[_host_name]
host.avail_vCPUs -= _vm_alloc["vcpus"]
host.avail_mem_cap -= _vm_alloc["mem"]
host.avail_local_disk_cap -= _vm_alloc["local_volume"]
host.vCPUs_used += _vm_alloc["vcpus"]
host.free_mem_mb -= _vm_alloc["mem"]
host.free_disk_gb -= _vm_alloc["local_volume"]
host.disk_available_least -= _vm_alloc["local_volume"]
host.remove_vm_by_h_uuid(_h_uuid)
return True
host.avail_vCPUs += _vcpus
host.avail_mem_cap += _mem
host.avail_local_disk_cap += _ldisk
def remove_vm_from_host(self, _vm_alloc, orch_id=None, uuid=None):
"""Remove vm from host with orch_id."""
host.vCPUs_used -= _vcpus
host.free_mem_mb += _mem
host.free_disk_gb += _ldisk
host.disk_available_least += _ldisk
host = self.hosts[_vm_alloc["host"]]
def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem,
_ldisk):
"""Remove vm from host by uuid, adjust compute resources for host."""
host = self.hosts[_host_name]
host.remove_vm_by_uuid(_uuid)
host.avail_vCPUs += _vcpus
host.avail_mem_cap += _mem
host.avail_local_disk_cap += _ldisk
host.vCPUs_used -= _vcpus
host.free_mem_mb += _mem
host.free_disk_gb += _ldisk
host.disk_available_least += _ldisk
# called from handle_events
def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem,
_ldisk, _fldisk, _avail_least):
updated = False
if host.remove_vm(orch_id, uuid) is True:
host.avail_vCPUs += _vm_alloc["vcpus"]
host.avail_mem_cap += _vm_alloc["mem"]
host.avail_local_disk_cap += _vm_alloc["local_volume"]
host.vCPUs_used -= _vm_alloc["vcpus"]
host.free_mem_mb += _vm_alloc["mem"]
host.free_disk_gb += _vm_alloc["local_volume"]
host.disk_available_least += _vm_alloc["local_volume"]
return True
else:
LOG.warn("vm to be removed not exist")
return False
def update_host_resources(self, _hn, _st):
"""Check and update compute node status."""
host = self.hosts[_hn]
if host.status != _st:
host.status = _st
LOG.warning(
"Resource.update_host_resources: host(%s) status changed" %
_hn)
updated = True
# FIXME(GJ): should check cpu, memm and disk here?
if updated is True:
self.compute_avail_resources(_hn, host)
return updated
LOG.warn("host(" + _hn + ") status changed")
return True
else:
return False
def update_host_time(self, _host_name):
"""Update last host update time."""
"""Mark the host update time for batch resource status update."""
host = self.hosts[_host_name]
host.last_update = time.time()
self.update_rack_resource(host)
def add_logical_group(self, _host_name, _lg_name, _lg_type):
"""Add logical group to host memberships and update host resource."""
def add_group(self, _host_name, _lg_name, _lg_type):
"""Add a group to resource unless the group exists."""
success = True
host = None
if _host_name in self.hosts.keys():
host = self.hosts[_host_name]
@ -540,96 +471,70 @@ class Resource(object):
host = self.host_groups[_host_name]
if host is not None:
if _lg_name not in self.logical_groups.keys():
logical_group = LogicalGroup(_lg_name)
logical_group.group_type = _lg_type
logical_group.last_update = time.time()
self.logical_groups[_lg_name] = logical_group
if _lg_name not in self.groups.keys():
group = Group(_lg_name)
group.group_type = _lg_type
group.last_update = time.time()
self.groups[_lg_name] = group
else:
success = False
if _lg_name not in host.memberships.keys():
host.memberships[_lg_name] = self.logical_groups[_lg_name]
host.memberships[_lg_name] = self.groups[_lg_name]
if isinstance(host, HostGroup):
host.last_update = time.time()
self.update_cluster_resource(host)
else:
success = False
else:
LOG.warn("host not found while adding group")
return False
return success
def add_vm_to_groups(self, _host, _vm_info, _groups_of_vm):
"""Add new vm into related groups."""
def add_vm_to_logical_groups(self, _host, _vm_id, _logical_groups_of_vm):
"""Add vm to logical group and update corresponding lg."""
for lgk in _host.memberships.keys():
if lgk in _logical_groups_of_vm:
lg = self.logical_groups[lgk]
if lgk in _groups_of_vm:
if lgk in self.groups.keys():
lg = self.groups[lgk]
if isinstance(_host, Host):
if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True:
lg.last_update = time.time()
elif isinstance(_host, HostGroup):
if self._check_group_type(lg.group_type):
if lgk.split(":")[0] == _host.host_type:
if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True:
lg.last_update = time.time()
if isinstance(_host, Host) and _host.host_group is not None:
self.add_vm_to_logical_groups(_host.host_group, _vm_id,
_logical_groups_of_vm)
elif (isinstance(_host, HostGroup) and
_host.parent_resource is not None):
self.add_vm_to_logical_groups(_host.parent_resource, _vm_id,
_logical_groups_of_vm)
def remove_vm_by_h_uuid_from_logical_groups(self, _host, _h_uuid):
"""Remove vm by orchestration id from lgs. Update host and lgs."""
for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys():
LOG.warning("logical group (%s) missing while "
"removing %s" % (lgk, _h_uuid))
continue
lg = self.logical_groups[lgk]
if isinstance(_host, Host):
# Remove host from lg's membership if the host
# has no vms of lg
if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True:
lg.last_update = time.time()
# Remove lg from host's membership if lg does not have the host
if _host.remove_membership(lg) is True:
_host.last_update = time.time()
elif isinstance(_host, HostGroup):
if self._check_group_type(lg.group_type):
if lgk.split(":")[0] == _host.host_type:
if lg.remove_vm_by_h_uuid(_h_uuid, _host.name):
if isinstance(_host, Host):
if lg.add_vm(_vm_info, _host.name) is True:
lg.last_update = time.time()
if _host.remove_membership(lg):
_host.last_update = time.time()
if self._check_group_type(lg.group_type):
if len(lg.vm_list) == 0:
del self.logical_groups[lgk]
else:
LOG.warn("vm already exists in group")
elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or \
lg.group_type == "AFF" or lg.group_type == "DIV":
if lgk.split(":")[0] == _host.host_type:
if lg.add_vm(_vm_info, _host.name) is True:
lg.last_update = time.time()
else:
LOG.warn("vm already exists in group")
else:
LOG.warn("nof found group while adding vm")
if isinstance(_host, Host) and _host.host_group is not None:
self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group,
_h_uuid)
elif (isinstance(_host, HostGroup) and
_host.parent_resource is not None):
self.remove_vm_by_h_uuid_from_logical_groups(
_host.parent_resource, _h_uuid)
self.add_vm_to_groups(_host.host_group, _vm_info, _groups_of_vm)
elif isinstance(_host, HostGroup) and \
_host.parent_resource is not None:
self.add_vm_to_groups(_host.parent_resource,
_vm_info, _groups_of_vm)
def remove_vm_from_groups(self, _host, orch_id=None, uuid=None):
"""Remove vm from related groups."""
def remove_vm_by_uuid_from_logical_groups(self, _host, _uuid):
"""Remove vm by uuid from lgs and update proper host and lgs."""
for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys():
LOG.warning("logical group (%s) missing while "
"removing %s" % (lgk, _uuid))
if lgk not in self.groups.keys():
continue
lg = self.logical_groups[lgk]
lg = self.groups[lgk]
if isinstance(_host, Host):
# Remove host from lg's membership if the host has
# no vms of lg
if lg.remove_vm_by_uuid(_uuid, _host.name) is True:
# Remove host from lg's membership if the host has no vms of lg
if lg.remove_vm(_host.name, orch_id, uuid) is True:
lg.last_update = time.time()
# Remove lg from host's membership if lg does not
@ -640,32 +545,34 @@ class Resource(object):
elif isinstance(_host, HostGroup):
if self._check_group_type(lg.group_type):
if lgk.split(":")[0] == _host.host_type:
if lg.remove_vm_by_uuid(_uuid, _host.name) is True:
if lg.remove_vm(_host.name, orch_id, uuid) is True:
lg.last_update = time.time()
if _host.remove_membership(lg) is True:
_host.last_update = time.time()
if self._check_group_type(lg.group_type):
if len(lg.vm_list) == 0:
del self.logical_groups[lgk]
if lg.group_type == "EX" or \
lg.group_type == "AFF" or lg.group_type == "DIV":
# FIXME(gjung): del self.groups[lgk] if len(lg.vm_list) == 0?
pass
if isinstance(_host, Host) and _host.host_group is not None:
self.remove_vm_by_uuid_from_logical_groups(_host.host_group, _uuid)
elif (isinstance(_host, HostGroup) and
_host.parent_resource is not None):
self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource,
_uuid)
self.remove_vm_from_groups(_host.host_group, orch_id, uuid)
elif isinstance(_host, HostGroup) and \
_host.parent_resource is not None:
self.remove_vm_from_groups(_host.parent_resource, orch_id, uuid)
def remove_vm_from_groups_of_host(self, _host, orch_id=None, uuid=None):
"""Remove vm from related groups of the host."""
def clean_none_vms_from_logical_groups(self, _host):
"""Clean vms with status none from logical groups."""
for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys():
if lgk not in self.groups.keys():
LOG.warn("group (" + lgk + ") already removed")
continue
lg = self.logical_groups[lgk]
lg = self.groups[lgk]
if isinstance(_host, Host):
if lg.clean_none_vms(_host.name) is True:
if lg.remove_vm_from_host(_host.name, orch_id, uuid) is True:
lg.last_update = time.time()
if _host.remove_membership(lg) is True:
@ -674,73 +581,69 @@ class Resource(object):
elif isinstance(_host, HostGroup):
if self._check_group_type(lg.group_type):
if lgk.split(":")[0] == _host.host_type:
if lg.clean_none_vms(_host.name) is True:
if lg.remove_vm_from_host(_host.name,
orch_id, uuid) is True:
lg.last_update = time.time()
if _host.remove_membership(lg) is True:
_host.last_update = time.time()
if self._check_group_type(lg.group_type):
if len(lg.vm_list) == 0:
del self.logical_groups[lgk]
if isinstance(_host, Host) and _host.host_group is not None:
self.clean_none_vms_from_logical_groups(_host.host_group)
elif (isinstance(_host, HostGroup) and
_host.parent_resource is not None):
self.clean_none_vms_from_logical_groups(_host.parent_resource)
self.remove_vm_from_groups_of_host(_host.host_group, orch_id, uuid)
elif isinstance(_host, HostGroup) and \
_host.parent_resource is not None:
self.remove_vm_from_groups_of_host(_host.parent_resource,
orch_id, uuid)
def update_uuid_in_groups(self, _orch_id, _uuid, _host):
"""Update physical uuid."""
def update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host):
"""Update uuid in lgs and update lg last update time."""
for lgk in _host.memberships.keys():
lg = self.logical_groups[lgk]
lg = self.groups[lgk]
if isinstance(_host, Host):
if lg.update_uuid(_h_uuid, _uuid, _host.name) is True:
if lg.update_uuid(_orch_id, _uuid, _host.name) is True:
lg.last_update = time.time()
elif isinstance(_host, HostGroup):
if self._check_group_type(lg.group_type):
if lgk.split(":")[0] == _host.host_type:
if lg.update_uuid(_h_uuid, _uuid, _host.name) is True:
if lg.update_uuid(_orch_id, _uuid, _host.name) is True:
lg.last_update = time.time()
if isinstance(_host, Host) and _host.host_group is not None:
self.update_uuid_in_logical_groups(
_h_uuid, _uuid, _host.host_group)
elif (isinstance(_host, HostGroup) and
_host.parent_resource is not None):
self.update_uuid_in_logical_groups(_h_uuid, _uuid,
_host.parent_resource)
self.update_uuid_in_groups(_orch_id, _uuid, _host.host_group)
elif isinstance(_host, HostGroup) and \
_host.parent_resource is not None:
self.update_uuid_in_groups(_orch_id, _uuid, _host.parent_resource)
def update_orch_id_in_groups(self, _orch_id, _uuid, _host):
"""Update orch_id."""
def update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host):
"""Update orchestration id in lgs and update lg last update time."""
for lgk in _host.memberships.keys():
lg = self.logical_groups[lgk]
lg = self.groups[lgk]
if isinstance(_host, Host):
if lg.update_h_uuid(_h_uuid, _uuid, _host.name):
if lg.update_orch_id(_orch_id, _uuid, _host.name) is True:
lg.last_update = time.time()
elif isinstance(_host, HostGroup):
if self._check_group_type(lg.group_type):
if lgk.split(":")[0] == _host.host_type:
if lg.update_h_uuid(_h_uuid, _uuid, _host.name):
if (lg.update_orch_id(_orch_id, _uuid, _host.name) is
True):
lg.last_update = time.time()
if isinstance(_host, Host) and _host.host_group is not None:
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid,
_host.host_group)
self.update_orch_id_in_groups(_orch_id, _uuid, _host.host_group)
elif (isinstance(_host, HostGroup) and
_host.parent_resource is not None):
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid,
_host.parent_resource)
self.update_orch_id_in_groups(_orch_id, _uuid,
_host.parent_resource)
def compute_avail_resources(self, hk, host):
"""Compute avail resources for host.
"""Compute the available amount of resources with oversubsription ratios.
This function computes ram, cpu and disk allocation ratios for
the passed in host. Then uses data to compute avail memory, disk
and vCPUs.
"""
ram_allocation_ratio_list = []
cpu_allocation_ratio_list = []
disk_allocation_ratio_list = []
@ -804,19 +707,26 @@ class Resource(object):
disk_allocation_ratio, static_disk_standby_ratio)
def get_flavor(self, _id):
"""Return flavor according to name passed in."""
"""Get a flavor info."""
flavor_id = None
if isinstance(_id, six.string_types):
flavor_id = _id
else:
flavor_id = str(_id)
flavor = None
if _id in self.flavors.keys():
flavor = self.flavors[_id]
if flavor_id in self.flavors.keys():
flavor = self.flavors[flavor_id]
else:
for _, f in self.flavors.iteritems():
if f.flavor_id == _id:
if f.flavor_id == flavor_id:
flavor = f
break
if flavor is not None:
if flavor.status != "enabled":
if flavor.status is not "enabled":
flavor = None
return flavor

View File

@ -1,674 +0,0 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource Base.
File contains resource datatype objects from base type of a flavor and
builds all the way up to a datacenter object.
"""
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
class Datacenter(object):
"""Datacenter Class.
This object represents a datacenter. It contains all memberships or
logical groups in the datacenter, all resources available, placed vms,
and more throughout the datacenter.
"""
def __init__(self, _name):
"""Init Datacenter object."""
self.name = _name
self.region_code_list = []
self.status = "enabled"
# all available logical groups (e.g., aggregate) in the datacenter
self.memberships = {}
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
self.resources = {}
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.vm_list = []
self.last_update = 0
def init_resources(self):
"""Init datacenter resources to 0."""
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
def get_json_info(self):
"""Return JSON info for datacenter object."""
membership_list = []
for lgk in self.memberships.keys():
membership_list.append(lgk)
child_list = []
for ck in self.resources.keys():
child_list.append(ck)
return {'status': self.status,
'name': self.name,
'region_code_list': self.region_code_list,
'membership_list': membership_list,
'vCPUs': self.vCPUs,
'original_vCPUs': self.original_vCPUs,
'avail_vCPUs': self.avail_vCPUs,
'mem': self.mem_cap,
'original_mem': self.original_mem_cap,
'avail_mem': self.avail_mem_cap,
'local_disk': self.local_disk_cap,
'original_local_disk': self.original_local_disk_cap,
'avail_local_disk': self.avail_local_disk_cap,
'children': child_list,
'vm_list': self.vm_list,
'last_update': self.last_update}
class HostGroup(object):
"""Class for Host Group Object.
This Class represents a group of hosts. If a single host is a single server
then host group is a rack or cluster of servers. This class contains all
memberships and resources for the group of hosts.
"""
def __init__(self, _id):
"""Init for Host Group Class."""
self.name = _id
# rack or cluster(e.g., power domain, zone)
self.host_type = "rack"
self.status = "enabled"
# all available logical groups (e.g., aggregate) in this group
self.memberships = {}
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
self.parent_resource = None # e.g., datacenter
self.child_resources = {} # e.g., hosting servers
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.vm_list = []
self.last_update = 0
def init_resources(self):
"""Init all host group resources to 0."""
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
def init_memberships(self):
"""Init Host Group memberships."""
for lgk in self.memberships.keys():
lg = self.memberships[lgk]
if (lg.group_type == "EX" or lg.group_type == "AFF" or
lg.group_type == "DIV"):
level = lg.name.split(":")[0]
if (LEVELS.index(level) < LEVELS.index(self.host_type) or
self.name not in lg.vms_per_host.keys()):
del self.memberships[lgk]
else:
del self.memberships[lgk]
def remove_membership(self, _lg):
"""Return True if membership to group _lg removed."""
cleaned = False
if (_lg.group_type == "EX" or _lg.group_type == "AFF" or
_lg.group_type == "DIV"):
if self.name not in _lg.vms_per_host.keys():
del self.memberships[_lg.name]
cleaned = True
return cleaned
def check_availability(self):
"""Return True if Host Group status is 'enabled'."""
if self.status == "enabled":
return True
else:
return False
def get_json_info(self):
"""Return JSON info for Host Group object."""
membership_list = []
for lgk in self.memberships.keys():
membership_list.append(lgk)
child_list = []
for ck in self.child_resources.keys():
child_list.append(ck)
parent_name = None
if self.parent_resource:
parent_name = self.parent_resource.name
return {'status': self.status,
'host_type': self.host_type,
'membership_list': membership_list,
'vCPUs': self.vCPUs,
'original_vCPUs': self.original_vCPUs,
'avail_vCPUs': self.avail_vCPUs,
'mem': self.mem_cap,
'original_mem': self.original_mem_cap,
'avail_mem': self.avail_mem_cap,
'local_disk': self.local_disk_cap,
'original_local_disk': self.original_local_disk_cap,
'avail_local_disk': self.avail_local_disk_cap,
'parent': parent_name,
'children': child_list,
'vm_list': self.vm_list,
'last_update': self.last_update}
class Host(object):
"""Class for Host Object.
This class is for a Host Object, imagine a server. This means
information about the groups the host is a part of, all the hardware
parameters (vCPUs, local disk, memory) as well as the list of vms and
volumes placed on the host.
"""
def __init__(self, _name):
"""Init for Host object."""
self.name = _name
# mark if this is synch'ed by multiple sources
self.tag = []
self.status = "enabled"
self.state = "up"
# logical group (e.g., aggregate) this hosting server is involved in
self.memberships = {}
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
self.vCPUs_used = 0
self.free_mem_mb = 0
self.free_disk_gb = 0
self.disk_available_least = 0
self.host_group = None # e.g., rack
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.vm_list = []
self.last_update = 0
def clean_memberships(self):
"""Return True if host cleaned from logical group membership."""
cleaned = False
for lgk in self.memberships.keys():
lg = self.memberships[lgk]
if self.name not in lg.vms_per_host.keys():
del self.memberships[lgk]
cleaned = True
return cleaned
def remove_membership(self, _lg):
"""Return True if host removed from logical group _lg passed in."""
cleaned = False
if (_lg.group_type == "EX" or _lg.group_type == "AFF" or
_lg.group_type == "DIV"):
if self.name not in _lg.vms_per_host.keys():
del self.memberships[_lg.name]
cleaned = True
return cleaned
def check_availability(self):
"""Return True if host is up, enabled and tagged as nova infra."""
if (self.status == "enabled" and self.state == "up" and
("nova" in self.tag) and ("infra" in self.tag)):
return True
else:
return False
def get_uuid(self, _h_uuid):
"""Return uuid of vm with matching orchestration id(_h_uuid)."""
uuid = None
for vm_id in self.vm_list:
if vm_id[0] == _h_uuid:
uuid = vm_id[2]
break
return uuid
def exist_vm_by_h_uuid(self, _h_uuid):
"""Return True if vm with orchestration id(_h_uuid) exists on host."""
exist = False
for vm_id in self.vm_list:
if vm_id[0] == _h_uuid:
exist = True
break
return exist
def exist_vm_by_uuid(self, _uuid):
"""Return True if vm with physical id(_uuid) exists on host."""
exist = False
for vm_id in self.vm_list:
if vm_id[2] == _uuid:
exist = True
break
return exist
def remove_vm_by_h_uuid(self, _h_uuid):
"""Return True if vm removed with matching _h_uuid."""
success = False
for vm_id in self.vm_list:
if vm_id[0] == _h_uuid:
self.vm_list.remove(vm_id)
success = True
break
return success
def remove_vm_by_uuid(self, _uuid):
"""Return True if vm removed with matching _uuid."""
success = False
for vm_id in self.vm_list:
if vm_id[2] == _uuid:
self.vm_list.remove(vm_id)
success = True
break
return success
def update_uuid(self, _h_uuid, _uuid):
"""Return True if vm physical id updated."""
success = False
vm_name = "none"
for vm_id in self.vm_list:
if vm_id[0] == _h_uuid:
vm_name = vm_id[1]
self.vm_list.remove(vm_id)
success = True
break
if success is True:
vm_id = (_h_uuid, vm_name, _uuid)
self.vm_list.append(vm_id)
return success
def update_h_uuid(self, _h_uuid, _uuid):
"""Return True if vm orchestration id (_h_uuid) updated."""
success = False
vm_name = "none"
for vm_id in self.vm_list:
if vm_id[2] == _uuid:
vm_name = vm_id[1]
self.vm_list.remove(vm_id)
success = True
break
if success is True:
vm_id = (_h_uuid, vm_name, _uuid)
self.vm_list.append(vm_id)
return success
def compute_avail_vCPUs(self, _overcommit_ratio, _standby_ratio):
"""Calc avail_vCPUs by calculating vCPUs and subtracting in use."""
self.vCPUs = \
self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio)
self.avail_vCPUs = self.vCPUs - self.vCPUs_used
def compute_avail_mem(self, _overcommit_ratio, _standby_ratio):
"""Calc avail_mem by calculating mem_cap and subtract used mem."""
self.mem_cap = \
self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio)
used_mem_mb = self.original_mem_cap - self.free_mem_mb
self.avail_mem_cap = self.mem_cap - used_mem_mb
def compute_avail_disk(self, _overcommit_ratio, _standby_ratio):
"""Calc avail_disk by calc local_disk_cap and subtract used disk."""
self.local_disk_cap = \
self.original_local_disk_cap * \
_overcommit_ratio * \
(1.0 - _standby_ratio)
free_disk_cap = self.free_disk_gb
if self.disk_available_least > 0:
free_disk_cap = min(self.free_disk_gb, self.disk_available_least)
used_disk_cap = self.original_local_disk_cap - free_disk_cap
self.avail_local_disk_cap = self.local_disk_cap - used_disk_cap
def get_json_info(self):
"""Return JSON info for Host object."""
membership_list = []
for lgk in self.memberships.keys():
membership_list.append(lgk)
return {'tag': self.tag, 'status': self.status, 'state': self.state,
'membership_list': membership_list,
'vCPUs': self.vCPUs,
'original_vCPUs': self.original_vCPUs,
'avail_vCPUs': self.avail_vCPUs,
'mem': self.mem_cap,
'original_mem': self.original_mem_cap,
'avail_mem': self.avail_mem_cap,
'local_disk': self.local_disk_cap,
'original_local_disk': self.original_local_disk_cap,
'avail_local_disk': self.avail_local_disk_cap,
'vCPUs_used': self.vCPUs_used,
'free_mem_mb': self.free_mem_mb,
'free_disk_gb': self.free_disk_gb,
'disk_available_least': self.disk_available_least,
'parent': self.host_group.name,
'vm_list': self.vm_list,
'last_update': self.last_update}
class LogicalGroup(object):
"""Logical Group class.
This class contains info about grouped vms, such as metadata when placing
nodes, list of placed vms, list of placed volumes and group type.
"""
def __init__(self, _name):
"""Init Logical Group object."""
self.name = _name
# AGGR, AZ, INTG, EX, DIV, or AFF
self.group_type = "AGGR"
self.status = "enabled"
# any metadata to be matched when placing nodes
self.metadata = {}
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.vm_list = []
# a list of placed volumes
self.volume_list = []
# key = host_id, value = a list of placed vms
self.vms_per_host = {}
self.last_update = 0
def exist_vm_by_h_uuid(self, _h_uuid):
"""Return True if h_uuid exist in vm_list as an orchestration_uuid."""
exist = False
for vm_id in self.vm_list:
if vm_id[0] == _h_uuid:
exist = True
break
return exist
def exist_vm_by_uuid(self, _uuid):
"""Return True if uuid exist in vm_list as physical_uuid."""
exist = False
for vm_id in self.vm_list:
if vm_id[2] == _uuid:
exist = True
break
return exist
def update_uuid(self, _h_uuid, _uuid, _host_id):
"""Return True if _uuid and/or _host_id successfully updated."""
success = False
vm_name = "none"
for vm_id in self.vm_list:
if vm_id[0] == _h_uuid:
vm_name = vm_id[1]
self.vm_list.remove(vm_id)
success = True
break
if _host_id in self.vms_per_host.keys():
for host_vm_id in self.vms_per_host[_host_id]:
if host_vm_id[0] == _h_uuid:
self.vms_per_host[_host_id].remove(host_vm_id)
success = True
break
if success is True:
vm_id = (_h_uuid, vm_name, _uuid)
self.vm_list.append(vm_id)
if _host_id in self.vms_per_host.keys():
self.vms_per_host[_host_id].append(vm_id)
return success
def update_h_uuid(self, _h_uuid, _uuid, _host_id):
"""Return True physical_uuid and/or _host_id successfully updated."""
success = False
vm_name = "none"
for vm_id in self.vm_list:
if vm_id[2] == _uuid:
vm_name = vm_id[1]
self.vm_list.remove(vm_id)
success = True
break
if _host_id in self.vms_per_host.keys():
for host_vm_id in self.vms_per_host[_host_id]:
if host_vm_id[2] == _uuid:
self.vms_per_host[_host_id].remove(host_vm_id)
success = True
break
if success is True:
vm_id = (_h_uuid, vm_name, _uuid)
self.vm_list.append(vm_id)
if _host_id in self.vms_per_host.keys():
self.vms_per_host[_host_id].append(vm_id)
return success
def add_vm_by_h_uuid(self, _vm_id, _host_id):
"""Return True if vm added with id _vm_id(orchestration id)."""
success = False
if self.exist_vm_by_h_uuid(_vm_id[0]) is False:
self.vm_list.append(_vm_id)
if self._check_group_type(self.group_type):
if _host_id not in self.vms_per_host.keys():
self.vms_per_host[_host_id] = []
self.vms_per_host[_host_id].append(_vm_id)
success = True
return success
def remove_vm_by_h_uuid(self, _h_uuid, _host_id):
"""Return True if vm removed with id _h_uuid(orchestration id)."""
success = False
for vm_id in self.vm_list:
if vm_id[0] == _h_uuid:
self.vm_list.remove(vm_id)
success = True
break
if _host_id in self.vms_per_host.keys():
for host_vm_id in self.vms_per_host[_host_id]:
if host_vm_id[0] == _h_uuid:
self.vms_per_host[_host_id].remove(host_vm_id)
success = True
break
if self._check_group_type(self.group_type):
if ((_host_id in self.vms_per_host.keys()) and
len(self.vms_per_host[_host_id]) == 0):
del self.vms_per_host[_host_id]
return success
def remove_vm_by_uuid(self, _uuid, _host_id):
"""Return True if vm with matching uuid found and removed."""
success = False
for vm_id in self.vm_list:
if vm_id[2] == _uuid:
self.vm_list.remove(vm_id)
success = True
break
if _host_id in self.vms_per_host.keys():
for host_vm_id in self.vms_per_host[_host_id]:
if host_vm_id[2] == _uuid:
self.vms_per_host[_host_id].remove(host_vm_id)
success = True
break
if self._check_group_type(self.group_type):
if ((_host_id in self.vms_per_host.keys()) and
len(self.vms_per_host[_host_id]) == 0):
del self.vms_per_host[_host_id]
return success
def clean_none_vms(self, _host_id):
"""Return True if vm's or host vm's removed with physical id none."""
success = False
blen = len(self.vm_list)
self.vm_list = [v for v in self.vm_list if v[2] != "none"]
alen = len(self.vm_list)
if alen != blen:
success = True
if _host_id in self.vms_per_host.keys():
blen = len(self.vms_per_host[_host_id])
self.vms_per_host[_host_id] = [
v for v in self.vms_per_host[_host_id] if v[2] != "none"]
alen = len(self.vm_list)
if alen != blen:
success = True
if self._check_group_type(self.group_type):
if ((_host_id in self.vms_per_host.keys()) and
len(self.vms_per_host[_host_id]) == 0):
del self.vms_per_host[_host_id]
return success
def get_json_info(self):
"""Return JSON info for Logical Group object."""
return {'status': self.status,
'group_type': self.group_type,
'metadata': self.metadata,
'vm_list': self.vm_list,
'vms_per_host': self.vms_per_host,
'last_update': self.last_update}
def _check_group_type(self, type):
return type in ['EX', 'AFF', 'DIV']
class Flavor(object):
"""Flavor class."""
def __init__(self, _name):
"""Init flavor object."""
self.name = _name
self.flavor_id = None
self.status = "enabled"
self.vCPUs = 0
self.mem_cap = 0 # MB
self.disk_cap = 0 # including ephemeral (GB) and swap (MB)
self.extra_specs = {}
self.last_update = 0
def get_json_info(self):
"""Return JSON info of Flavor Object."""
return {'status': self.status,
'flavor_id': self.flavor_id,
'vCPUs': self.vCPUs,
'mem': self.mem_cap,
'disk': self.disk_cap,
'extra_specs': self.extra_specs,
'last_update': self.last_update}

View File

@ -0,0 +1,78 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Datacenter(object):
'''Container for datacenter resource.'''
def __init__(self, _name):
self.name = _name
self.status = "enabled"
# all available groups (e.g., aggregate) in the datacenter
self.memberships = {}
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
self.resources = {}
self.vm_list = [] # a list of placed vms
self.last_update = 0
def init_resources(self):
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
def get_json_info(self):
membership_list = []
for lgk in self.memberships.keys():
membership_list.append(lgk)
child_list = []
for ck in self.resources.keys():
child_list.append(ck)
return {'status': self.status,
'name': self.name,
'membership_list': membership_list,
'vCPUs': self.vCPUs,
'original_vCPUs': self.original_vCPUs,
'avail_vCPUs': self.avail_vCPUs,
'mem': self.mem_cap,
'original_mem': self.original_mem_cap,
'avail_mem': self.avail_mem_cap,
'local_disk': self.local_disk_cap,
'original_local_disk': self.original_local_disk_cap,
'avail_local_disk': self.avail_local_disk_cap,
'children': child_list,
'vm_list': self.vm_list,
'last_update': self.last_update}

View File

@ -0,0 +1,41 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Flavor(object):
'''Container for flavor resource.'''
def __init__(self, _name):
self.name = _name
self.flavor_id = None
self.status = "enabled"
self.vCPUs = 0
self.mem_cap = 0 # MB
self.disk_cap = 0 # including ephemeral (GB) and swap (MB)
self.extra_specs = {}
self.last_update = 0
def get_json_info(self):
return {'status': self.status,
'flavor_id': self.flavor_id,
'vCPUs': self.vCPUs,
'mem': self.mem_cap,
'disk': self.disk_cap,
'extra_specs': self.extra_specs,
'last_update': self.last_update}

View File

@ -0,0 +1,212 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Group(object):
'''Container for groups.'''
def __init__(self, _name):
self.name = _name
self.group_type = "AGGR" # AGGR, AZ, INTG, EX, DIV, or AFF
self.status = "enabled"
# any metadata to be matched when placing nodes
self.metadata = {}
self.vm_list = [] # a list of placed vms
# key = host_name, value = a list of placed vms
self.vms_per_host = {}
self.last_update = 0
def exist_vm(self, orch_id=None, uuid=None):
'''Check if the vm exists in this group.'''
exist = False
if orch_id is not None and orch_id != "none":
for vm_info in self.vm_list:
if vm_info["orch_id"] == orch_id:
exist = True
break
if not exist:
if uuid is not None and uuid != "none":
for vm_info in self.vm_list:
if vm_info["uuid"] == uuid:
exist = True
break
return exist
def exist_vm_in_host(self, _host_name, orch_id=None, uuid=None):
'''Check if the vm exists in the host in this group.'''
exist = False
if _host_name in self.vms_per_host.keys():
vm_list = self.vms_per_host[_host_name]
if orch_id is not None and orch_id != "none":
for vm_info in vm_list:
if vm_info["orch_id"] == orch_id:
exist = True
break
if not exist:
if uuid is not None and uuid != "none":
for vm_info in vm_list:
if vm_info["uuid"] == uuid:
exist = True
break
return exist
def update_uuid(self, _orch_id, _uuid, _host_name):
'''Update a vm with uuid.'''
success = False
for vm_info in self.vm_list:
if vm_info["orch_id"] == _orch_id:
vm_info["uuid"] = _uuid
success = True
break
if _host_name in self.vms_per_host.keys():
for host_vm_info in self.vms_per_host[_host_name]:
if host_vm_info["orch_id"] == _orch_id:
host_vm_info["uuid"] = _uuid
success = True
break
return success
def update_orch_id(self, _orch_id, _uuid, _host_name):
'''Update a vm with orch_id.'''
success = False
for vm_info in self.vm_list:
if vm_info["uuid"] == _uuid:
vm_info["orch_id"] = _orch_id
success = True
break
if _host_name in self.vms_per_host.keys():
for host_vm_info in self.vms_per_host[_host_name]:
if host_vm_info["uuid"] == _uuid:
host_vm_info["orch_id"] = _orch_id
success = True
break
return success
def add_vm(self, _vm_info, _host_name):
'''Add vm to this group.'''
if self.exist_vm(orch_id=_vm_info["orch_id"], uuid=_vm_info["uuid"]):
self._remove_vm(orch_id=_vm_info["orch_id"], uuid=_vm_info["uuid"])
self.vm_list.append(_vm_info)
if self.exist_vm_in_host(_host_name, orch_id=_vm_info["orch_id"],
uuid=_vm_info["uuid"]):
self.remove_vm_from_host(_host_name, orch_id=_vm_info["orch_id"],
uuid=_vm_info["uuid"])
if (self.group_type == "EX" or self.group_type == "AFF" or
self.group_type == "DIV"):
if _host_name not in self.vms_per_host.keys():
self.vms_per_host[_host_name] = []
self.vms_per_host[_host_name].append(_vm_info)
return True
def remove_vm(self, _host_name, orch_id=None, uuid=None):
'''Remove vm from this group.'''
success = False
success = self._remove_vm(orch_id, uuid)
success = self.remove_vm_from_host(_host_name, orch_id, uuid)
return success
def _remove_vm(self, orch_id=None, uuid=None):
'''Remove vm from this group.'''
success = False
if orch_id is not None and orch_id != "none":
for vm_info in self.vm_list:
if vm_info["orch_id"] == orch_id:
self.vm_list.remove(vm_info)
success = True
break
if not success:
if uuid is not None and uuid != "none":
for vm_info in self.vm_list:
if vm_info["uuid"] == uuid:
self.vm_list.remove(vm_info)
success = True
break
return success
def remove_vm_from_host(self, _host_name, orch_id=None, uuid=None):
'''Remove vm from the host of this group.'''
success = False
if orch_id is not None and orch_id != "none":
if _host_name in self.vms_per_host.keys():
for host_vm_info in self.vms_per_host[_host_name]:
if host_vm_info["orch_id"] == orch_id:
self.vms_per_host[_host_name].remove(host_vm_info)
success = True
break
if not success:
if uuid is not None and uuid != "none":
if _host_name in self.vms_per_host.keys():
for host_vm_info in self.vms_per_host[_host_name]:
if host_vm_info["uuid"] == uuid:
self.vms_per_host[_host_name].remove(host_vm_info)
success = True
break
if (self.group_type == "EX" or self.group_type == "AFF" or
self.group_type == "DIV"):
if ((_host_name in self.vms_per_host.keys()) and
len(self.vms_per_host[_host_name]) == 0):
del self.vms_per_host[_host_name]
return success
def get_json_info(self):
'''Get group info as JSON format.'''
return {'status': self.status,
'group_type': self.group_type,
'metadata': self.metadata,
'vm_list': self.vm_list,
'vms_per_host': self.vms_per_host,
'last_update': self.last_update}

View File

@ -0,0 +1,230 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Host(object):
'''Container for compute host.'''
def __init__(self, _name):
self.name = _name
# mark if this is synch'ed by multiple sources
self.tag = []
self.status = "enabled"
self.state = "up"
# group (e.g., aggregate) this hosting server is involved in
self.memberships = {}
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
self.vCPUs_used = 0
self.free_mem_mb = 0
self.free_disk_gb = 0
self.disk_available_least = 0
self.host_group = None # e.g., rack
self.vm_list = [] # a list of placed vms
self.last_update = 0
def clean_memberships(self):
'''Remove from memberships.'''
cleaned = False
for lgk in self.memberships.keys():
lg = self.memberships[lgk]
if self.name not in lg.vms_per_host.keys():
del self.memberships[lgk]
cleaned = True
return cleaned
def remove_membership(self, _lg):
'''Remove a membership. '''
cleaned = False
if (_lg.group_type == "EX" or _lg.group_type == "AFF" or
_lg.group_type == "DIV"):
if self.name not in _lg.vms_per_host.keys():
del self.memberships[_lg.name]
cleaned = True
return cleaned
def check_availability(self):
'''Check if host is available.'''
if (self.status == "enabled" and self.state == "up" and
("nova" in self.tag) and ("infra" in self.tag)):
return True
else:
return False
def get_vm_info(self, orch_id=None, uuid=None):
'''Get vm info.'''
vm_info = None
if orch_id is not None and orch_id != "none":
for v_info in self.vm_list:
if v_info["orch_id"] == orch_id:
vm_info = v_info
break
if vm_info is None:
if uuid is not None and uuid != "none":
for v_info in self.vm_list:
if v_info["uuid"] == uuid:
vm_info = v_info
break
return vm_info
def get_uuid(self, _orch_id):
uuid = None
for vm_info in self.vm_list:
if vm_info["orch_id"] == _orch_id:
uuid = vm_info["uuid"]
break
return uuid
def exist_vm(self, orch_id=None, uuid=None):
'''Check if vm is located in this host.'''
exist = False
if orch_id is not None and orch_id != "none":
for vm_info in self.vm_list:
if vm_info["orch_id"] == orch_id:
exist = True
break
if not exist:
if uuid is not None and uuid != "none":
for vm_info in self.vm_list:
if vm_info["uuid"] == uuid:
exist = True
break
return exist
def remove_vm(self, orch_id=None, uuid=None):
'''Remove vm from this host.'''
success = False
if orch_id is not None and orch_id != "none":
for vm_info in self.vm_list:
if vm_info["orch_id"] == orch_id:
self.vm_list.remove(vm_info)
success = True
break
if not success:
if uuid is not None and uuid != "none":
for vm_info in self.vm_list:
if vm_info["uuid"] == uuid:
self.vm_list.remove(vm_info)
success = True
break
return success
def update_uuid(self, _orch_id, _uuid):
'''Update a vm to include uuid.'''
success = False
for vm_info in self.vm_list:
if vm_info["orch_id"] == _orch_id:
vm_info["uuid"] = _uuid
success = True
break
return success
def update_orch_id(self, _orch_id, _uuid):
success = False
for vm_info in self.vm_list:
if vm_info["uuid"] == _uuid:
vm_info["orch_id"] = _orch_id
success = True
break
return success
def compute_avail_vCPUs(self, _overcommit_ratio, _standby_ratio):
self.vCPUs = self.original_vCPUs * _overcommit_ratio * \
(1.0 - _standby_ratio)
self.avail_vCPUs = self.vCPUs - self.vCPUs_used
def compute_avail_mem(self, _overcommit_ratio, _standby_ratio):
self.mem_cap = self.original_mem_cap * _overcommit_ratio * \
(1.0 - _standby_ratio)
used_mem_mb = self.original_mem_cap - self.free_mem_mb
self.avail_mem_cap = self.mem_cap - used_mem_mb
def compute_avail_disk(self, _overcommit_ratio, _standby_ratio):
self.local_disk_cap = self.original_local_disk_cap * \
_overcommit_ratio * (1.0 - _standby_ratio)
free_disk_cap = self.free_disk_gb
if self.disk_available_least > 0:
free_disk_cap = min(self.free_disk_gb, self.disk_available_least)
used_disk_cap = self.original_local_disk_cap - free_disk_cap
self.avail_local_disk_cap = self.local_disk_cap - used_disk_cap
def get_json_info(self):
membership_list = []
for lgk in self.memberships.keys():
membership_list.append(lgk)
return {'tag': self.tag, 'status': self.status, 'state': self.state,
'membership_list': membership_list,
'vCPUs': self.vCPUs,
'original_vCPUs': self.original_vCPUs,
'avail_vCPUs': self.avail_vCPUs,
'mem': self.mem_cap,
'original_mem': self.original_mem_cap,
'avail_mem': self.avail_mem_cap,
'local_disk': self.local_disk_cap,
'original_local_disk': self.original_local_disk_cap,
'avail_local_disk': self.avail_local_disk_cap,
'vCPUs_used': self.vCPUs_used,
'free_mem_mb': self.free_mem_mb,
'free_disk_gb': self.free_disk_gb,
'disk_available_least': self.disk_available_least,
'parent': self.host_group.name,
'vm_list': self.vm_list,
'last_update': self.last_update}

View File

@ -0,0 +1,116 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from valet.engine.optimizer.app_manager.group import LEVEL
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
class HostGroup(object):
'''Container for host group (rack).'''
def __init__(self, _id):
self.name = _id
# rack or cluster(e.g., power domain, zone)
self.host_type = "rack"
self.status = "enabled"
# all available groups (e.g., aggregate) in this group
self.memberships = {}
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
self.parent_resource = None # e.g., datacenter
self.child_resources = {} # e.g., hosting servers
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.vm_list = []
self.last_update = 0
def init_resources(self):
self.vCPUs = 0
self.original_vCPUs = 0
self.avail_vCPUs = 0
self.mem_cap = 0 # MB
self.original_mem_cap = 0
self.avail_mem_cap = 0
self.local_disk_cap = 0 # GB, ephemeral
self.original_local_disk_cap = 0
self.avail_local_disk_cap = 0
def init_memberships(self):
for lgk in self.memberships.keys():
lg = self.memberships[lgk]
if (lg.group_type == "EX" or lg.group_type == "AFF" or
lg.group_type == "DIV"):
level = lg.name.split(":")[0]
if (LEVELS.index(level) < LEVELS.index(self.host_type) or
self.name not in lg.vms_per_host.keys()):
del self.memberships[lgk]
else:
del self.memberships[lgk]
def remove_membership(self, _lg):
cleaned = False
if (_lg.group_type == "EX" or _lg.group_type == "AFF" or
_lg.group_type == "DIV"):
if self.name not in _lg.vms_per_host.keys():
del self.memberships[_lg.name]
cleaned = True
return cleaned
def check_availability(self):
if self.status == "enabled":
return True
else:
return False
def get_json_info(self):
membership_list = []
for lgk in self.memberships.keys():
membership_list.append(lgk)
child_list = []
for ck in self.child_resources.keys():
child_list.append(ck)
return {'status': self.status,
'host_type': self.host_type,
'membership_list': membership_list,
'vCPUs': self.vCPUs,
'original_vCPUs': self.original_vCPUs,
'avail_vCPUs': self.avail_vCPUs,
'mem': self.mem_cap,
'original_mem': self.original_mem_cap,
'avail_mem': self.avail_mem_cap,
'local_disk': self.local_disk_cap,
'original_local_disk': self.original_local_disk_cap,
'avail_local_disk': self.avail_local_disk_cap,
'parent': self.parent_resource.name,
'children': child_list,
'vm_list': self.vm_list,
'last_update': self.last_update}

View File

@ -0,0 +1,42 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from valet.engine.resource_manager.heat import Heat
class StackManager(object):
def __init__(self, _resource, _config, _logger):
self.phandler = None
self.ahandler = None
self.resource = _resource
self.config = _config
self.logger = _logger
def set_handlers(self, _placement_handler, _app_handler):
'''Set handlers.'''
self.phandler = _placement_handler
self.ahandler = _app_handler
def set_stacks(self):
self.logger.info("set stacks")
# stacks = {}
# stack_getter = Heat(self.logger)
return True

View File

@ -1,168 +0,0 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Topology class - performs actual setting up of Topology object."""
import copy
from oslo_log import log
from sre_parse import isdigit
from valet.engine.resource_manager.resource_base import HostGroup
LOG = log.getLogger(__name__)
class Topology(object):
"""Topology class.
currently, using cannonical naming convention to find the topology
"""
def __init__(self, _config):
"""Init config and logger."""
self.config = _config
# Triggered by rhosts change
def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts):
"""Return result status if setting host or network topology fails."""
result_status = self._set_host_topology(_datacenter, _host_groups,
_hosts, _rhosts)
if result_status != "success":
return result_status
# TODO(GJ): set network bandwidth links
return "success"
def _set_host_topology(self, _datacenter, _host_groups, _hosts, _rhosts):
for rhk, rh in _rhosts.iteritems():
h = copy.deepcopy(rh)
if "infra" not in h.tag:
h.tag.append("infra")
(region_name, rack_name, _, status) = self._set_layout_by_name(rhk)
if status != "success":
LOG.warning(status + " in host_name (" + rhk + ")")
if region_name not in _datacenter.region_code_list:
_datacenter.region_code_list.append(region_name)
if rack_name not in _host_groups.keys():
host_group = HostGroup(rack_name)
host_group.host_type = "rack"
_host_groups[host_group.name] = host_group
h.host_group = _host_groups[rack_name]
_hosts[h.name] = h
for hgk, hg in _host_groups.iteritems():
hg.parent_resource = _datacenter
for _, h in _hosts.iteritems():
if h.host_group.name == hgk:
hg.child_resources[h.name] = h
_datacenter.resources[hgk] = hg
if len(_datacenter.region_code_list) > 1:
LOG.warning("more than one region code")
if "none" in _host_groups.keys():
LOG.warning("some hosts are into unknown rack")
return "success"
def _set_layout_by_name(self, _host_name):
region_name = None
rack_name = None
node_type = None
status = "success"
validated_name = True
num_of_fields = 0
index = 0
end_of_region_index = 0
end_of_rack_index = 0
index_of_node_type = 0
for c in _host_name:
if index >= self.config.num_of_region_chars:
if not isdigit(c):
if index == self.config.num_of_region_chars:
status = "invalid region name = " + \
_host_name[:index] + c
validated_name = False
break
if end_of_region_index == 0:
if c not in self.config.rack_code_list:
status = "invalid rack_char = " + c
validated_name = False
break
end_of_region_index = index
num_of_fields += 1
if index == (end_of_region_index + 1):
msg = "invalid rack name = {0}{1}"
status = msg.format(_host_name[:index], c)
validated_name = False
break
if (end_of_rack_index == 0 and
index > (end_of_region_index + 1)):
end_of_rack_index = index
num_of_fields += 1
if node_type is None and end_of_rack_index > 0:
node_type = c
if node_type not in self.config.node_code_list:
status = "invalid node_char = " + c
validated_name = False
break
index_of_node_type = index
num_of_fields += 1
if c == '.':
break
if index_of_node_type > 0 and index > index_of_node_type:
num_of_fields += 1
break
index += 1
if not index > (index_of_node_type + 1):
status = "invalid node name = " + _host_name[:index]
validated_name = False
if num_of_fields != 3:
status = "invalid number of identification fields = " + \
str(num_of_fields)
validated_name = False
if validated_name is False:
region_name = "none"
rack_name = "none"
else:
region_name = _host_name[:end_of_region_index]
rack_name = _host_name[:end_of_rack_index]
return (region_name, rack_name, node_type, status)

View File

@ -17,10 +17,10 @@ import time
from oslo_log import log
from valet.engine.resource_manager.resource_base import Datacenter
from valet.engine.resource_manager.resource_base import Host
from valet.engine.resource_manager.resource_base import HostGroup
from valet.engine.resource_manager.topology import Topology
from valet.engine.resource_manager.naming import Naming
from valet.engine.resource_manager.resources.datacenter import Datacenter
from valet.engine.resource_manager.resources.host import Host
from valet.engine.resource_manager.resources.host_group import HostGroup
LOG = log.getLogger(__name__)
@ -45,49 +45,47 @@ class TopologyManager(threading.Thread):
self.update_batch_wait = self.config.update_batch_wait
def run(self):
"""Function starts and tracks Topology Manager Thread."""
LOG.info("TopologyManager: start " +
self.thread_name + " ......")
"""Keep checking datacenter topology."""
LOG.info("start " + self.thread_name + " ......")
period_end = 0
if self.config.topology_trigger_freq > 0:
period_end = time.time() + self.config.topology_trigger_freq
while self.end_of_process is False:
time.sleep(70)
curr_ts = time.time()
if curr_ts > period_end:
# Give some time (batch_wait) to update resource status via
# message bus. Otherwise, late update will be cleaned up
time_diff = curr_ts - self.resource.current_timestamp
if time_diff > self.update_batch_wait:
self._run()
period_end = (curr_ts +
self.config.topology_trigger_freq)
# NOTE(GJ): do not timer based batch
LOG.info("exit topology_manager " + self.thread_name)
while self.end_of_process is False:
time.sleep(70)
curr_ts = time.time()
if curr_ts > period_end:
# Give some time (batch_wait) to update resource status via
# message bus. Otherwise, late update will be cleaned up
now_time = (curr_ts - self.resource.current_timestamp)
if now_time > self.update_batch_wait:
self._run()
period_end = time.time() + \
self.config.topology_trigger_freq
LOG.info("exit " + self.thread_name)
def _run(self):
LOG.info("TopologyManager: --- start topology "
"status update ---")
if self.set_topology() is not True:
LOG.warning("fail to set topology")
LOG.info("--- done topology status update ---")
def set_topology(self):
"""Set datacenter layout"""
LOG.info("set datacenter layout")
host_groups = {}
hosts = {}
# NOTE(GJ): do not consider switch topology at this version
datacenter = Datacenter(self.config.datacenter_name)
topology = Topology(self.config)
status = topology.set_topology(datacenter, host_groups, hosts,
self.resource.hosts)
if status != "success":
topology = Naming(self.config, LOG)
if topology.set_topology(datacenter, host_groups, hosts,
self.resource.hosts) != "success":
return False
self.data_lock.acquire()
@ -104,11 +102,10 @@ class TopologyManager(threading.Thread):
if hk not in self.resource.hosts.keys():
new_host = self._create_new_host(_hosts[hk])
self.resource.hosts[new_host.name] = new_host
new_host.last_update = time.time()
LOG.warning("TopologyManager: new host (" +
new_host.name + ") added from configuration")
LOG.info("TopologyManager: new host (" +
new_host.name + ") added from configuration")
updated = True
for rhk in self.resource.hosts.keys():
@ -116,33 +113,30 @@ class TopologyManager(threading.Thread):
host = self.resource.hosts[rhk]
if "infra" in host.tag:
host.tag.remove("infra")
host.last_update = time.time()
LOG.warning("TopologyManager: host (" +
host.name + ") removed from configuration")
LOG.info("TopologyManager: host (" +
host.name + ") removed from configuration")
updated = True
for hgk in _host_groups.keys():
if hgk not in self.resource.host_groups.keys():
new_host_group = self._create_new_host_group(_host_groups[hgk])
self.resource.host_groups[new_host_group.name] = new_host_group
new_host_group.last_update = time.time()
LOG.warning("TopologyManager: new host_group (" +
new_host_group.name + ") added")
LOG.info("TopologyManager: new host_group (" +
new_host_group.name + ") added")
updated = True
for rhgk in self.resource.host_groups.keys():
if rhgk not in _host_groups.keys():
host_group = self.resource.host_groups[rhgk]
host_group.status = "disabled"
host_group.last_update = time.time()
LOG.warning("TopologyManager: host_group (" +
host_group.name + ") disabled")
LOG.info("TopologyManager: host_group (" +
host_group.name + ") disabled")
updated = True
for hk in _hosts.keys():
@ -194,11 +188,7 @@ class TopologyManager(threading.Thread):
if "infra" not in _rhost.tag:
_rhost.tag.append("infra")
updated = True
LOG.warning("TopologyManager: host (" + _rhost.name +
") updated (tag)")
if (_rhost.host_group is None or
_host.host_group.name != _rhost.host_group.name):
LOG.info("host (" + _rhost.name + ") updated (tag)")
if _host.host_group.name in self.resource.host_groups.keys():
_rhost.host_group = \
@ -206,8 +196,7 @@ class TopologyManager(threading.Thread):
else:
_rhost.host_group = self.resource.datacenter
updated = True
LOG.warning("TopologyManager: host (" + _rhost.name +
") updated (host_group)")
LOG.info("host (" + _rhost.name + ") updated (host_group)")
return updated
@ -217,14 +206,12 @@ class TopologyManager(threading.Thread):
if _hg.host_type != _rhg.host_type:
_rhg.host_type = _hg.host_type
updated = True
LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (hosting type)")
LOG.info("host_group (" + _rhg.name + ") updated (hosting type)")
if _rhg.status == "disabled":
_rhg.status = "enabled"
updated = True
LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (enabled)")
LOG.info("host_group (" + _rhg.name + ") updated (enabled)")
if _hg.parent_resource != _rhg.parent_resource:
if _hg.parent_resource.name in self.resource.host_groups.keys():
@ -233,8 +220,8 @@ class TopologyManager(threading.Thread):
else:
_rhg.parent_resource = self.resource.datacenter
updated = True
LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (parent host_group)")
LOG.info("host_group (" + _rhg.name + ") updated (parenti"
" host_group)")
for rk in _hg.child_resources.keys():
exist = False
@ -248,8 +235,8 @@ class TopologyManager(threading.Thread):
elif _rhg.host_type == "cluster":
_rhg.child_resources[rk] = self.resource.host_groups[rk]
updated = True
LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (new child host)")
LOG.info("host_group (" + _rhg.name + ") updated (new child i"
"host)")
for rrk in _rhg.child_resources.keys():
exist = False
@ -260,30 +247,14 @@ class TopologyManager(threading.Thread):
if exist is False:
del _rhg.child_resources[rrk]
updated = True
LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (child host removed)")
LOG.info("host_group (" + _rhg.name + ") updated (child host "
"removed)")
return updated
def _check_datacenter_update(self, _datacenter):
updated = False
for rc in _datacenter.region_code_list:
if rc not in self.resource.datacenter.region_code_list:
self.resource.datacenter.region_code_list.append(rc)
updated = True
LOG.warning("TopologyManager: datacenter updated "
"(new region code, " + rc + ")")
code_list = self.resource.datacenter.region_code_list
blen = len(code_list)
code_list = [r for r in code_list if r in _datacenter.region_code_list]
alen = len(code_list)
if alen != blen:
updated = True
self.resource.datacenter.region_code_list = code_list
LOG.warning("datacenter updated (region code removed)")
for rk in _datacenter.resources.keys():
exist = False
for rrk in self.resource.datacenter.resources.keys():
@ -299,8 +270,7 @@ class TopologyManager(threading.Thread):
self.resource.datacenter.resources[rk] = \
self.resource.hosts[rk]
updated = True
LOG.warning("TopologyManager: datacenter updated "
"(new resource)")
LOG.info("datacenter updated (new resource)")
for rrk in self.resource.datacenter.resources.keys():
exist = False
@ -311,7 +281,6 @@ class TopologyManager(threading.Thread):
if exist is False:
del self.resource.datacenter.resources[rrk]
updated = True
LOG.warning("TopologyManager: datacenter updated "
"(resource removed)")
LOG.info("datacenter updated (resource removed)")
return updated

View File

@ -14,58 +14,54 @@
# limitations under the License.
"""Test Topology."""
from oslo_log import log
from valet.engine.resource_manager.topology import Topology
from valet.engine.resource_manager.naming import Naming
from valet.tests.base import Base
LOG = log.getLogger(__name__)
class TestTopology(Base):
"""Unit Tests for valet.engine.resource_manager.topology."""
class TestNaming(Base):
"""Unit Tests for valet.engine.resource_manager.naming."""
def setUp(self):
"""Setup TestTopology Test Class."""
super(TestTopology, self).setUp()
self.topo = Topology(Config())
"""Setup TestNaming Test Class."""
super(TestNaming, self).setUp()
self.topo = Naming(Config(), LOG)
def test_simple_topology(self):
"""Validate simple topology (region, rack, node_type and status)."""
(region, rack, node_type, status) = \
(full_rack_name, status) = \
self.topo._set_layout_by_name("pdk15r05c001")
self.validate_test(region == "pdk15")
self.validate_test(rack == "pdk15r05")
self.validate_test(node_type in "a,c,u,f,o,p,s")
self.validate_test(full_rack_name == "pdk15r05")
self.validate_test(status == "success")
def test_domain_topology(self):
"""Test Domain Topology."""
(region, rack, node_type, status) = \
(full_rack_name, status) = \
self.topo._set_layout_by_name("ihk01r01c001.emea.att.com")
self.validate_test(region == "ihk01")
self.validate_test(rack == "ihk01r01")
self.validate_test(node_type in "a,c,u,f,o,p,s")
self.validate_test(full_rack_name == "ihk01r01")
self.validate_test(status == "success")
def test_unhappy_topology_r(self):
"""Test unhappy topology, region/rack/node none, invalid status 0."""
(region, rack, node_type, status) = \
(full_rack_name, status) = \
self.topo._set_layout_by_name("pdk1505c001")
self.validate_test(region == "none")
self.validate_test(rack == "none")
self.validate_test(node_type is None)
self.validate_test(status == "invalid number of "
"identification fields = 0")
self.validate_test(full_rack_name == "none")
self.validate_test(status == "invalid rack_char = c. "
"missing rack_char = r")
def test_unhappy_topology_c(self):
"""Test unhappy topology with values none and 1 invalid status."""
(region, rack, node_type, status) = \
(full_rack_name, status) = \
self.topo._set_layout_by_name("pdk15r05001")
self.validate_test(region == "none")
self.validate_test(rack == "none")
self.validate_test(node_type is None)
self.validate_test(status == "invalid number of "
"identification fields = 1")
self.validate_test(full_rack_name == "none")
self.validate_test(status == "incorrect format of rack "
"name = ")
# TODO(UNKNOWN): add validation to topology for region