Refactor collector service

Change-Id: I6d5cbfa89d9e52e2be31eba2f509062a565e4ecd
This commit is contained in:
kong 2015-05-03 23:43:59 +08:00
parent b58b966f50
commit 45b10f82ec
33 changed files with 154 additions and 5461 deletions

View File

@ -28,7 +28,7 @@ import os
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'mistral', '__init__.py')):
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'terracotta', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from oslo_config import cfg
@ -40,14 +40,7 @@ from terracotta import rpc
from terracotta.locals import collector
from terracotta.locals import manager as local_mgr
from terracotta.globals import manager as global_mgr
from mistral import context as ctx
from mistral.db.v2 import api as db_api
from mistral.engine import default_engine as def_eng
from mistral.engine import default_executor as def_executor
from mistral.engine import rpc
from mistral.services import scheduler
from mistral import version
from terracotta import version
LOG = logging.getLogger(__name__)

View File

@ -90,37 +90,47 @@ invoked, the component performs the following steps:
seconds.
"""
from collections import deque
from contracts import contract
from neat.contracts_primitive import *
from neat.contracts_extra import *
import libvirt
import os
import time
from collections import deque
import libvirt
import neat.common as common
from neat.config import *
from neat.db_utils import *
from oslo_config import cfg
from oslo_log import log as logging
import logging
from terracotta.openstack.common import service
from terracotta import common
from terracotta.contracts_extra import *
from terracotta.contracts_primitive import *
from terracotta.openstack.common import periodic_task
from terracotta.openstack.common import threadgroup
from terracotta.utils import db_utils
log = logging.getLogger(__name__)
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Collector(service.Service):
class Collector(periodic_task.PeriodicTasks):
def __init__(self):
super(Service, self).__init__()
self.state = self.init_state()
super(Collector, self).__init__()
vm_path = common.build_local_vm_path(CONF.local_data_directory)
if not os.access(vm_path, os.F_OK):
os.makedirs(vm_path)
LOG.info('Created a local VM data directory: %s', vm_path)
else:
self.cleanup_all_local_data(CONF.local_data_directory)
LOG.info('Creaned up the local data directory: %s',
CONF.local_data_directory)
self.state = self.init_state()
self.tg = threadgroup.ThreadGroup()
self.tg.add_dynamic_timer(
self.execute,
initial_delay=initial_delay,
periodic_interval_max=self.periodic_interval_max,
self.state
self.run_periodic_tasks,
initial_delay=None,
periodic_interval_max=1,
context=None
)
@contract
@ -136,15 +146,15 @@ class Collector(service.Service):
vir_connection = libvirt.openReadOnly(None)
if vir_connection is None:
message = 'Failed to open a connection to the hypervisor'
log.critical(message)
LOG.critical(message)
raise OSError(message)
hostname = vir_connection.getHostname()
host_cpu_mhz, host_ram = get_host_characteristics(vir_connection)
host_cpu_mhz, host_ram = self.get_host_characteristics(vir_connection)
physical_cpus = common.physical_cpu_count(vir_connection)
host_cpu_usable_by_vms = float(config['host_cpu_usable_by_vms'])
host_cpu_usable_by_vms = float(CONF.host_cpu_usable_by_vms)
db = init_db(config['sql_connection'])
db = db_utils.init_db()
db.update_host(hostname,
int(host_cpu_mhz * host_cpu_usable_by_vms),
physical_cpus,
@ -159,15 +169,15 @@ class Collector(service.Service):
'vir_connection': vir_connection,
'hostname': hostname,
'host_cpu_overload_threshold':
float(config['host_cpu_overload_threshold']) * \
float(CONF.host_cpu_overload_threshold) * \
host_cpu_usable_by_vms,
'physical_cpus': physical_cpus,
'physical_cpu_mhz': host_cpu_mhz,
'physical_core_mhz': host_cpu_mhz / physical_cpus,
'db': db}
def execute(self, state):
@periodic_task.periodic_task
def execute(self):
""" Execute a data collection iteration.
1. Read the names of the files from the <local_data_directory>/vm
@ -196,55 +206,43 @@ class Collector(service.Service):
8. Store the converted data in the <local_data_directory>/vm
directory in separate files for each VM, and submit the data to the
central database.
:param config: A config dictionary.
:type config: dict(str: *)
:param state: A state dictionary.
:type state: dict(str: *)
:return: The updated state dictionary.
:rtype: dict(str: *)
"""
log.info('Started an iteration')
vm_path = common.build_local_vm_path(config['local_data_directory'])
host_path = common.build_local_host_path(config['local_data_directory'])
data_length = int(config['data_collector_data_length'])
vms_previous = get_previous_vms(vm_path)
vms_current = get_current_vms(state['vir_connection'])
LOG.info('Started an iteration')
state = self.state
vms_added = get_added_vms(vms_previous, vms_current.keys())
vm_path = common.build_local_vm_path(CONF.local_data_directory)
host_path = common.build_local_host_path(CONF.local_data_directory)
data_length = int(CONF.data_collector_data_length)
vms_previous = self.get_previous_vms(vm_path)
vms_current = self.get_current_vms(state['vir_connection'])
vms_added = self.get_added_vms(vms_previous, vms_current.keys())
added_vm_data = dict()
if vms_added:
if log.isEnabledFor(logging.DEBUG):
log.debug('Added VMs: %s', str(vms_added))
LOG.debug('Added VMs: %s', str(vms_added))
for i, vm in enumerate(vms_added):
if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING:
del vms_added[i]
del vms_current[vm]
if log.isEnabledFor(logging.DEBUG):
log.debug('Added VM %s skipped as migrating in', vm)
LOG.debug('Added VM %s skipped as migrating in', vm)
added_vm_data = fetch_remote_data(state['db'],
added_vm_data = self.fetch_remote_data(state['db'],
data_length,
vms_added)
if log.isEnabledFor(logging.DEBUG):
log.debug('Fetched remote data: %s', str(added_vm_data))
write_vm_data_locally(vm_path, added_vm_data, data_length)
LOG.debug('Fetched remote data: %s', str(added_vm_data))
self.write_vm_data_locally(vm_path, added_vm_data, data_length)
vms_removed = get_removed_vms(vms_previous, vms_current.keys())
vms_removed = self.get_removed_vms(vms_previous, vms_current.keys())
if vms_removed:
if log.isEnabledFor(logging.DEBUG):
log.debug('Removed VMs: %s', str(vms_removed))
cleanup_local_vm_data(vm_path, vms_removed)
LOG.debug('Removed VMs: %s', str(vms_removed))
self.cleanup_local_vm_data(vm_path, vms_removed)
for vm in vms_removed:
del state['previous_cpu_time'][vm]
del state['previous_cpu_mhz'][vm]
log.info('Started VM data collection')
LOG.info('Started VM data collection')
current_time = time.time()
(cpu_time, cpu_mhz) = get_cpu_mhz(state['vir_connection'],
(cpu_time, cpu_mhz) = self.get_cpu_mhz(state['vir_connection'],
state['physical_core_mhz'],
state['previous_cpu_time'],
state['previous_time'],
@ -252,38 +250,41 @@ class Collector(service.Service):
vms_current.keys(),
state['previous_cpu_mhz'],
added_vm_data)
log.info('Completed VM data collection')
LOG.info('Completed VM data collection')
log.info('Started host data collection')
(host_cpu_time_total,
host_cpu_time_busy,
host_cpu_mhz) = get_host_cpu_mhz(state['physical_cpu_mhz'],
LOG.info('Started host data collection')
(host_cpu_time_total, host_cpu_time_busy, host_cpu_mhz) = \
self.get_host_cpu_mhz(
state['physical_cpu_mhz'],
state['previous_host_cpu_time_total'],
state['previous_host_cpu_time_busy'])
log.info('Completed host data collection')
state['previous_host_cpu_time_busy']
)
LOG.info('Completed host data collection')
if state['previous_time'] > 0:
append_vm_data_locally(vm_path, cpu_mhz, data_length)
append_vm_data_remotely(state['db'], cpu_mhz)
self.append_vm_data_locally(vm_path, cpu_mhz, data_length)
self.append_vm_data_remotely(state['db'], cpu_mhz)
total_vms_cpu_mhz = sum(cpu_mhz.values())
host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz
if host_cpu_mhz_hypervisor < 0:
host_cpu_mhz_hypervisor = 0
total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor
append_host_data_locally(host_path, host_cpu_mhz_hypervisor, data_length)
append_host_data_remotely(state['db'],
self.append_host_data_locally(host_path, host_cpu_mhz_hypervisor,
data_length)
self.append_host_data_remotely(state['db'],
state['hostname'],
host_cpu_mhz_hypervisor)
if log.isEnabledFor(logging.DEBUG):
log.debug('Collected VM CPU MHz: %s', str(cpu_mhz))
log.debug('Collected total VMs CPU MHz: %s', str(total_vms_cpu_mhz))
log.debug('Collected hypervisor CPU MHz: %s', str(host_cpu_mhz_hypervisor))
log.debug('Collected host CPU MHz: %s', str(host_cpu_mhz))
log.debug('Collected total CPU MHz: %s', str(total_cpu_mhz))
LOG.debug('Collected VM CPU MHz: %s', str(cpu_mhz))
LOG.debug('Collected total VMs CPU MHz: %s',
str(total_vms_cpu_mhz))
LOG.debug('Collected hypervisor CPU MHz: %s',
str(host_cpu_mhz_hypervisor))
LOG.debug('Collected host CPU MHz: %s', str(host_cpu_mhz))
LOG.debug('Collected total CPU MHz: %s', str(total_cpu_mhz))
state['previous_overload'] = log_host_overload(
state['previous_overload'] = self.log_host_overload(
state['db'],
state['host_cpu_overload_threshold'],
state['hostname'],
@ -297,8 +298,8 @@ class Collector(service.Service):
state['previous_host_cpu_time_total'] = host_cpu_time_total
state['previous_host_cpu_time_busy'] = host_cpu_time_busy
log.info('Completed an iteration')
return state
LOG.info('Completed an iteration')
self.state = state
@contract
@ -347,7 +348,7 @@ class Collector(service.Service):
:return: A list of VM UUIDs added since the last time frame.
:rtype: list(str)
"""
return substract_lists(current_vms, previous_vms)
return self.substract_lists(current_vms, previous_vms)
@contract
@ -567,45 +568,42 @@ class Collector(service.Service):
:rtype: tuple(dict(str : int), dict(str : int))
"""
previous_vms = previous_cpu_time.keys()
added_vms = get_added_vms(previous_vms, current_vms)
removed_vms = get_removed_vms(previous_vms, current_vms)
added_vms = self.get_added_vms(previous_vms, current_vms)
removed_vms = self.get_removed_vms(previous_vms, current_vms)
cpu_mhz = {}
for uuid in removed_vms:
del previous_cpu_time[uuid]
for uuid, cpu_time in previous_cpu_time.items():
current_cpu_time = get_cpu_time(vir_connection, uuid)
current_cpu_time = self.get_cpu_time(vir_connection, uuid)
if current_cpu_time < cpu_time:
if log.isEnabledFor(logging.DEBUG):
log.debug('VM %s: current_cpu_time < cpu_time: ' +
LOG.debug('VM %s: current_cpu_time < cpu_time: ' +
'previous CPU time %d, ' +
'current CPU time %d',
uuid, cpu_time, current_cpu_time)
log.debug('VM %s: using previous CPU MHz %d',
LOG.debug('VM %s: using previous CPU MHz %d',
uuid, previous_cpu_mhz[uuid])
cpu_mhz[uuid] = previous_cpu_mhz[uuid]
else:
if log.isEnabledFor(logging.DEBUG):
log.debug('VM %s: previous CPU time %d, ' +
LOG.debug('VM %s: previous CPU time %d, ' +
'current CPU time %d, ' +
'previous time %.10f, ' +
'current time %.10f',
uuid, cpu_time, current_cpu_time,
previous_time, current_time)
cpu_mhz[uuid] = calculate_cpu_mhz(physical_core_mhz,
cpu_mhz[uuid] = self.calculate_cpu_mhz(physical_core_mhz,
previous_time,
current_time,
cpu_time,
current_cpu_time)
previous_cpu_time[uuid] = current_cpu_time
if log.isEnabledFor(logging.DEBUG):
log.debug('VM %s: CPU MHz %d', uuid, cpu_mhz[uuid])
LOG.debug('VM %s: CPU MHz %d', uuid, cpu_mhz[uuid])
for uuid in added_vms:
if added_vm_data[uuid]:
cpu_mhz[uuid] = added_vm_data[uuid][-1]
previous_cpu_time[uuid] = get_cpu_time(vir_connection, uuid)
previous_cpu_time[uuid] = self.get_cpu_time(vir_connection, uuid)
return previous_cpu_time, cpu_mhz
@ -658,7 +656,8 @@ class Collector(service.Service):
@contract
def get_host_cpu_mhz(self, cpu_mhz, previous_cpu_time_total, previous_cpu_time_busy):
def get_host_cpu_mhz(self, cpu_mhz, previous_cpu_time_total,
previous_cpu_time_busy):
""" Get the average CPU utilization in MHz for a set of VMs.
:param cpu_mhz: The total frequency of the physical CPU in MHz.
@ -677,15 +676,15 @@ class Collector(service.Service):
cpu_usage = int(cpu_mhz * (cpu_time_busy - previous_cpu_time_busy) / \
(cpu_time_total - previous_cpu_time_total))
if cpu_usage < 0:
raise ValueError('The host CPU usage in MHz must be >=0, but it is: ' + str(cpu_usage) +
raise ValueError(
'The host CPU usage in MHz must be >=0, but it is: ' + str(
cpu_usage) +
'; cpu_mhz=' + str(cpu_mhz) +
'; previous_cpu_time_total=' + str(previous_cpu_time_total) +
'; cpu_time_total=' + str(cpu_time_total) +
'; previous_cpu_time_busy=' + str(previous_cpu_time_busy) +
'; cpu_time_busy=' + str(cpu_time_busy))
return cpu_time_total, \
cpu_time_busy, \
cpu_usage
return cpu_time_total, cpu_time_busy, cpu_usage
@contract()
@ -715,7 +714,8 @@ class Collector(service.Service):
@contract()
def log_host_overload(self, db, overload_threshold, hostname, previous_overload,
def log_host_overload(self, db, overload_threshold, hostname,
previous_overload,
host_total_mhz, host_utilization_mhz):
""" Log to the DB whether the host is overloaded.
@ -745,7 +745,6 @@ class Collector(service.Service):
if previous_overload != -1 and previous_overload != overload_int \
or previous_overload == -1:
db.insert_host_overload(hostname, overload)
if log.isEnabledFor(logging.DEBUG):
log.debug('Overload state logged: %s', str(overload))
LOG.debug('Overload state logged: %s', str(overload))
return overload_int

View File

@ -118,12 +118,13 @@ from terracotta.openstack.common import threadgroup
from terracotta.utils import db_utils
log = logging.getLogger(__name__)
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class LocalManager(periodic_task.PeriodicTasks):
def __init__(self):
super(Service, self).__init__()
super(LocalManager, self).__init__()
self.state = self.init_state()
self.tg = threadgroup.ThreadGroup()
self.tg.add_dynamic_timer(
@ -193,6 +194,7 @@ class LocalManager(periodic_task.PeriodicTasks):
"""
LOG.info('Started an iteration')
state = self.state
vm_path = common.build_local_vm_path(CONF.local_data_directory)
vm_cpu_mhz = self.get_local_vm_data(vm_path)
vm_ram = self.get_ram(state['vir_connection'], vm_cpu_mhz.keys())

View File

@ -1,86 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import datetime
import neat.globals.db_cleaner as cleaner
import neat.common as common
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class DbCleaner(TestCase):
@qc(10)
def start(
iterations=int_(min=0, max=10),
time_interval=int_(min=0)
):
with MockTransaction:
state = {'property': 'value'}
config = {
'log_directory': 'dir',
'log_level': 2,
'db_cleaner_interval': str(time_interval)}
paths = [cleaner.DEFAULT_CONFIG_PATH, cleaner.CONFIG_PATH]
fields = cleaner.REQUIRED_FIELDS
expect(cleaner).read_and_validate_config(paths, fields). \
and_return(config).once()
expect(common).init_logging('dir', 'db-cleaner.log', 2).once()
expect(common).start(cleaner.init_state,
cleaner.execute,
config,
time_interval).and_return(state).once()
assert cleaner.start() == state
@qc(1)
def init_state():
with MockTransaction:
db = mock('db')
expect(cleaner).init_db('db'). \
and_return(db).once()
config = {'sql_connection': 'db',
'db_cleaner_interval': 7200}
state = cleaner.init_state(config)
assert state['db'] == db
assert state['time_delta'] == datetime.timedelta(0, 7200)
@qc(1)
def execute(
uuid=str_(of='abc123-', min_length=36, max_length=36)
):
with MockTransaction:
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
time = datetime.datetime.today()
for i in range(10):
db.vm_resource_usage.insert().execute(
vm_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
state = {
'db': db,
'time_delta': datetime.timedelta(seconds=5)}
expect(cleaner).today(). \
and_return(time.replace(second=10)).once()
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(10)
cleaner.execute({}, state)
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(5, 10)

View File

@ -1,408 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import bottle
from hashlib import sha1
from novaclient import client
import time
import subprocess
import neat.globals.manager as manager
import neat.common as common
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class GlobalManager(TestCase):
def test_raise_error(self):
for error_code in [400, 401, 403, 405, 412]:
try:
manager.raise_error(error_code)
except bottle.HTTPResponse as e:
assert e.status_code == error_code
else:
assert False
try:
manager.raise_error(1)
except bottle.HTTPResponse as e:
assert e.status_code == 500
else:
assert False
def test_error(self):
try:
manager.error()
except bottle.HTTPResponse as e:
assert e.status_code == 405
else:
assert False
def test_validate_params(self):
with MockTransaction:
expect(manager).raise_error(401).and_return(1).exactly(3).times()
manager.validate_params('test', 'test', {})
manager.validate_params('test', 'test', {'username': 'test'})
manager.validate_params('test', 'test', {'password': 'test'})
with MockTransaction:
expect(manager).raise_error(403).exactly(2).times()
manager.validate_params(
sha1('test').hexdigest(),
sha1('test2').hexdigest(),
{'username': sha1('test1').hexdigest(),
'password': sha1('test2').hexdigest(),
'host': 'test',
'reason': 0})
manager.validate_params(
sha1('test1').hexdigest(),
sha1('test').hexdigest(),
{'username': sha1('test1').hexdigest(),
'password': sha1('test2').hexdigest(),
'host': 'test',
'reason': 0})
assert manager.validate_params(
sha1('test1').hexdigest(),
sha1('test2').hexdigest(),
{'username': sha1('test1').hexdigest(),
'password': sha1('test2').hexdigest(),
'time': time.time(),
'host': 'test',
'reason': 1,
'vm_uuids': ['qwe', 'asd']})
assert manager.validate_params(
sha1('test1').hexdigest(),
sha1('test2').hexdigest(),
{'username': sha1('test1').hexdigest(),
'password': sha1('test2').hexdigest(),
'time': time.time(),
'host': 'test',
'reason': 0})
with MockTransaction:
expect(manager).raise_error(400).exactly(7).times()
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'time': time.time()})
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'time': time.time(),
'reason': 1})
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'time': time.time(),
'reason': 0})
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'time': time.time(),
'reason': 1,
'host': 'test'})
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'time': time.time(),
'reason': 0,
'vm_uuids': []})
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'reason': 0,
'vm_uuids': []})
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'reason': 1,
'vm_uuids': []})
with MockTransaction:
expect(manager).raise_error(412).exactly(2).times()
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'time': 1.,
'reason': 0,
'host': 'test'})
manager.validate_params('test', 'test', {'username': 'test',
'password': 'test',
'time': time.time() - 6,
'reason': 0,
'host': 'test'})
assert manager.validate_params('test', 'test',
{'username': 'test',
'password': 'test',
'time': time.time(),
'reason': 0,
'host': 'test'})
assert manager.validate_params('test', 'test',
{'username': 'test',
'password': 'test',
'time': time.time() - 4,
'reason': 0,
'host': 'test'})
def test_start(self):
with MockTransaction:
app = mock('app')
db = mock('db')
hosts = ['host1', 'host2']
state = {'property': 'value',
'db': db,
'compute_hosts': hosts,
'host_macs': {}}
config = {
'log_directory': 'dir',
'log_level': 2,
'global_manager_host': 'localhost',
'global_manager_port': 8080,
'ether_wake_interface': 'eth0'}
paths = [manager.DEFAULT_CONFIG_PATH, manager.CONFIG_PATH]
fields = manager.REQUIRED_FIELDS
expect(manager).read_and_validate_config(paths, fields). \
and_return(config).once()
expect(common).init_logging('dir', 'global-manager.log', 2).once()
expect(manager).init_state(config). \
and_return(state).once()
expect(manager).switch_hosts_on(db, 'eth0', {}, hosts).once()
expect(bottle).app().and_return(app).once()
expect(bottle).run(host='localhost', port=8080).once()
manager.start()
def test_init_state(self):
with MockTransaction:
db = mock('db')
nova = mock('nova')
hosts = ['host1', 'host2']
config = {'sql_connection': 'db',
'os_admin_user': 'user',
'os_admin_password': 'password',
'os_admin_tenant_name': 'tenant',
'os_auth_url': 'url',
'compute_hosts': 'host1, host2'}
expect(manager).init_db('db').and_return(db).once()
expect(client).Client(
2, 'user', 'password', 'tenant', 'url',
service_type='compute'). \
and_return(nova).once()
expect(common).parse_compute_hosts('host1, host2'). \
and_return(hosts).once()
state = manager.init_state(config)
assert state['previous_time'] == 0
assert state['db'] == db
assert state['nova'] == nova
assert state['hashed_username'] == sha1('user').hexdigest()
assert state['hashed_password'] == sha1('password').hexdigest()
assert state['compute_hosts'] == hosts
assert state['host_macs'] == {}
def test_service(self):
app = mock('app')
state = {'hashed_username': 'user',
'hashed_password': 'password'}
config = {'global_manager_host': 'localhost',
'global_manager_port': 8080}
app.state = {'state': state,
'config': config}
with MockTransaction:
params = {'reason': 0,
'host': 'host'}
expect(manager).get_params(Any).and_return(params).once()
expect(manager).get_remote_addr(Any).and_return('addr').once()
expect(bottle).app().and_return(app).once()
expect(manager).validate_params('user', 'password', params). \
and_return(True).once()
expect(manager).execute_underload(config, state, 'host').once()
manager.service()
with MockTransaction:
params = {'reason': 1,
'host': 'host',
'vm_uuids': 'vm_uuids'}
expect(manager).get_params(Any).and_return(params).once()
expect(manager).get_remote_addr(Any).and_return('addr').once()
expect(bottle).app().and_return(app).once()
expect(manager).validate_params('user', 'password', params). \
and_return(True).once()
expect(manager).execute_overload(config, state, 'host', 'vm_uuids'). \
once()
manager.service()
@qc(20)
def vms_by_host(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=str_(of='abc123-', min_length=10, max_length=10),
min_length=0, max_length=3
),
y=list_(str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=3),
host=str_(of='abc123-', min_length=5, max_length=5)
):
with MockTransaction:
extra_vms = {}
for vm in y:
extra_vms[vm] = host
x.update(extra_vms)
vms = []
for vm_uuid, h in x.items():
vm = mock(vm_uuid)
vm.id = vm_uuid
expect(manager).vm_hostname(vm).and_return(h).once()
vms.append(vm)
nova = mock('nova')
nova.servers = mock('servers')
expect(nova.servers).list().and_return(vms).once()
assert set(manager.vms_by_host(nova, host)) == set(y)
@qc(1)
def vms_by_hosts(
x=list_(str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=3),
y=list_(str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=3),
host1=str_(of='abc123-', min_length=5, max_length=10),
host2=str_(of='abc123-', min_length=5, max_length=10)
):
with MockTransaction:
vms1 = {}
for vm in x:
vms1[vm] = host1
vms2 = {}
for vm in y:
vms2[vm] = host2
vms_all = dict(vms1)
vms_all.update(vms2)
vms = []
for vm_uuid, h in vms_all.items():
vm = mock(vm_uuid)
vm.id = vm_uuid
expect(manager).vm_hostname(vm).and_return(h).once()
vms.append(vm)
nova = mock('nova')
nova.servers = mock('servers')
expect(nova.servers).list().and_return(vms).once()
result = manager.vms_by_hosts(nova, [host1, host2])
result_sets = {}
for host, data in result.items():
result_sets[host] = set(data)
assert result_sets == {host1: set(x), host2: set(y)}
def test_host_used_ram(self):
with MockTransaction:
hostname = 'hosthost'
nova = mock('nova')
nova.hosts = mock('hosts')
host1 = mock('host1')
host1.memory_mb = 4000
host2 = mock('host2')
host2.memory_mb = 3000
expect(nova.hosts).get(hostname). \
and_return([host1, host2]).once()
assert manager.host_used_ram(nova, hostname) == 3000
with MockTransaction:
hostname = 'hosthost'
nova = mock('nova')
nova.hosts = mock('hosts')
host1 = mock('host1')
host1.memory_mb = 4000
host2 = mock('host2')
host2.memory_mb = 3000
host3 = mock('host3')
host3.memory_mb = 3500
expect(nova.hosts).get(hostname). \
and_return([host1, host2, host3]).once()
assert manager.host_used_ram(nova, hostname) == 3500
def test_flavors_ram(self):
with MockTransaction:
nova = mock('nova')
nova.flavors = mock('flavors')
fl1 = mock('fl1')
fl1.id = '1'
fl1.ram = 1000
fl2 = mock('fl2')
fl2.id = '2'
fl2.ram = 2000
expect(nova.flavors).list().and_return([fl1, fl2]).once()
assert manager.flavors_ram(nova) == {'1': 1000, '2': 2000}
def test_vms_ram_limit(self):
with MockTransaction:
nova = mock('nova')
nova.servers = mock('servers')
flavors_to_ram = {'1': 512, '2': 1024}
expect(manager).flavors_ram(nova). \
and_return(flavors_to_ram).once()
vm1 = mock('vm1')
vm1.flavor = {'id': '1'}
vm2 = mock('vm2')
vm2.flavor = {'id': '2'}
expect(nova.servers).get('vm1').and_return(vm1).once()
expect(nova.servers).get('vm2').and_return(vm2).once()
assert manager.vms_ram_limit(nova, ['vm1', 'vm2']) == \
{'vm1': 512, 'vm2': 1024}
def test_switch_hosts_off(self):
db = db_utils.init_db('sqlite:///:memory:')
with MockTransaction:
expect(subprocess).call('ssh h1 "sleep"', shell=True).once()
expect(subprocess).call('ssh h2 "sleep"', shell=True).once()
expect(db).insert_host_states({
'h1': 0,
'h2': 0}).once()
manager.switch_hosts_off(db, 'sleep', ['h1', 'h2'])
with MockTransaction:
expect(subprocess).call.never()
expect(db).insert_host_states({
'h1': 0,
'h2': 0}).once()
manager.switch_hosts_off(db, '', ['h1', 'h2'])
def test_switch_hosts_on(self):
db = db_utils.init_db('sqlite:///:memory:')
with MockTransaction:
expect(subprocess).call(any_of(['ether-wake -i eth0 mac1',
'etherwake -i eth0 mac1']),
shell=True).once()
expect(subprocess).call(any_of(['ether-wake -i eth0 mac2',
'etherwake -i eth0 mac2']),
shell=True).once()
expect(manager).host_mac('h1').and_return('mac1').once()
expect(db).insert_host_states({
'h1': 1,
'h2': 1}).once()
manager.switch_hosts_on(db, 'eth0', {'h2': 'mac2'}, ['h1', 'h2'])
with MockTransaction:
expect(subprocess).call(any_of(['ether-wake -i eth0 mac1',
'etherwake -i eth0 mac1']),
shell=True).once()
expect(subprocess).call(any_of(['ether-wake -i eth0 mac2',
'etherwake -i eth0 mac2']),
shell=True).once()
expect(manager).host_mac('h1').and_return('mac1').once()
expect(manager).host_mac('h2').and_return('mac2').once()
expect(db).insert_host_states({
'h1': 1,
'h2': 1}).once()
manager.switch_hosts_on(db, 'eth0', {}, ['h1', 'h2'])

View File

@ -1,266 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.globals.vm_placement.bin_packing as packing
import logging
logging.disable(logging.CRITICAL)
class BinPacking(TestCase):
def test_best_fit_decreasing_factory(self):
alg = packing.best_fit_decreasing_factory(300, 20.,
{'cpu_threshold': 0.8,
'ram_threshold': 0.9,
'last_n_vm_cpu': 1})
hosts_cpu_usage = {
'host1': 200,
'host2': 2200,
'host3': 1200}
hosts_cpu_total = {
'host1': 4000,
'host2': 4000,
'host3': 4000}
hosts_ram_usage = {
'host1': 3276,
'host2': 6348,
'host3': 5324}
hosts_ram_total = {
'host1': 8192,
'host2': 8192,
'host3': 8192}
inactive_hosts_cpu = {
'host4': 3000,
'host5': 1000,
'host6': 2000}
inactive_hosts_ram = {
'host4': 4096,
'host5': 1024,
'host6': 2048}
vms_cpu = {
'vm1': [100, 1000],
'vm2': [100, 1000],
'vm3': [100, 1000]}
vms_ram = {
'vm1': 2048,
'vm2': 4096,
'vm3': 2048}
self.assertEqual(alg(hosts_cpu_usage, hosts_cpu_total,
hosts_ram_usage, hosts_ram_total,
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram), ({
'vm1': 'host6',
'vm2': 'host1',
'vm3': 'host3'}, {}))
def test_get_available_resources(self):
self.assertEqual(packing.get_available_resources(
0.8,
{'host1': 700, 'host2': 200}, {'host1': 1000, 'host2': 2000}),
{'host1': 100, 'host2': 1400})
def test_best_fit_decreasing(self):
hosts_cpu = {
'host1': 3000,
'host2': 1000,
'host3': 2000}
hosts_ram = {
'host1': 1024,
'host2': 4096,
'host3': 2048}
inactive_hosts_cpu = {}
inactive_hosts_ram = {}
vms_cpu = {
'vm1': [100, 2000],
'vm2': [100, 1000],
'vm3': [100, 3000]}
vms_ram = {
'vm1': 512,
'vm2': 512,
'vm3': 512}
assert packing.best_fit_decreasing(
1, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host3',
'vm2': 'host2',
'vm3': 'host1'}
hosts_cpu = {
'host1': 3000,
'host2': 1000,
'host3': 2000}
hosts_ram = {
'host1': 4096,
'host2': 1024,
'host3': 2048}
inactive_hosts_cpu = {}
inactive_hosts_ram = {}
vms_cpu = {
'vm1': [100, 1000],
'vm2': [100, 1000],
'vm3': [100, 1000]}
vms_ram = {
'vm1': 1536,
'vm2': 512,
'vm3': 1536}
assert packing.best_fit_decreasing(
1, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host1',
'vm2': 'host2',
'vm3': 'host3'}
hosts_cpu = {
'host1': 3000,
'host2': 1000,
'host3': 2000}
hosts_ram = {
'host1': 4096,
'host2': 1024,
'host3': 2048}
inactive_hosts_cpu = {}
inactive_hosts_ram = {}
vms_cpu = {
'vm1': [100, 1000],
'vm2': [100, 1000],
'vm3': [100, 1000]}
vms_ram = {
'vm1': 1536,
'vm2': 1536,
'vm3': 1536}
assert packing.best_fit_decreasing(
1, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host1',
'vm2': 'host1',
'vm3': 'host3'}
hosts_cpu = {
'host1': 3000,
'host2': 1000,
'host3': 2000}
hosts_ram = {
'host1': 4096,
'host2': 1024,
'host3': 2048}
inactive_hosts_cpu = {}
inactive_hosts_ram = {}
vms_cpu = {
'vm1': [100, 1000],
'vm2': [100, 1000],
'vm3': [100, 1000]}
vms_ram = {
'vm1': 3072,
'vm2': 1536,
'vm3': 1536}
assert packing.best_fit_decreasing(
1, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {}
hosts_cpu = {
'host1': 3000,
'host2': 1000,
'host3': 2000}
hosts_ram = {
'host1': 4096,
'host2': 1024,
'host3': 2048}
inactive_hosts_cpu = {
'host4': 3000,
'host5': 1000,
'host6': 2000}
inactive_hosts_ram = {
'host4': 4096,
'host5': 1024,
'host6': 2048}
vms_cpu = {
'vm1': [100, 1000],
'vm2': [100, 1000],
'vm3': [100, 1000]}
vms_ram = {
'vm1': 2048,
'vm2': 4096,
'vm3': 2048}
assert packing.best_fit_decreasing(
1, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host6',
'vm2': 'host1',
'vm3': 'host3'}
hosts_cpu = {
'host1': 3000,
'host2': 1000,
'host3': 2000}
hosts_ram = {
'host1': 4096,
'host2': 1024,
'host3': 2048}
inactive_hosts_cpu = {
'host4': 3000,
'host5': 1000,
'host6': 2000}
inactive_hosts_ram = {
'host4': 4096,
'host5': 1024,
'host6': 2048}
vms_cpu = {
'vm1': [100, 1000],
'vm2': [100, 1000],
'vm3': [100, 1000]}
vms_ram = {
'vm1': 2048,
'vm2': 5120,
'vm3': 2048}
assert packing.best_fit_decreasing(
1, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {}
hosts_cpu = {
'host1': 3000,
'host2': 1000,
'host3': 2000}
hosts_ram = {
'host1': 4096,
'host2': 1024,
'host3': 2048}
inactive_hosts_cpu = {}
inactive_hosts_ram = {}
vms_cpu = {
'vm1': [1000, 1000],
'vm2': [0, 2000],
'vm3': [500, 1500]}
vms_ram = {
'vm1': 1536,
'vm2': 1536,
'vm3': 1536}
assert packing.best_fit_decreasing(
2, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host1',
'vm2': 'host1',
'vm3': 'host3'}

View File

@ -1,83 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
from operator import le
import neat.locals.overload.mhod.bruteforce as b
import neat.locals.overload.mhod.nlp as nlp
import logging
logging.disable(logging.CRITICAL)
class Bruteforce(TestCase):
def test_solve2(self):
def fn1(x, y):
return x + y
def fn2(x, y):
return 2 * x + y
def fn3(x, y):
return x - y
def fn4(x, y):
return x / y
self.assertEqual([round(x, 1)
for x in b.solve2(fn1, (fn1, le, 10), 0.1, 1.0)],
[1.0, 1.0])
self.assertEqual([round(x, 1)
for x in b.solve2(fn1, (fn1, le, 0.5), 0.1, 1.0)],
[0.0, 0.5])
self.assertEqual([round(x, 1)
for x in b.solve2(fn2, (fn1, le, 0.5), 0.1, 1.0)],
[0.5, 0.0])
self.assertEqual([round(x, 1)
for x in b.solve2(fn3, (fn3, le, 10), 0.1, 1.0)],
[1.0, 0.0])
self.assertEqual([round(x, 1)
for x in b.solve2(fn4, (fn4, le, 10), 0.1, 1.0)],
[1.0, 0.1])
def test_optimize(self):
with MockTransaction:
step = 0.1
limit = 1
otf = 0.3
migration_time = 20.
ls = [lambda x: x, lambda x: x]
p = [[0, 1]]
state_vector = [0, 1]
time_in_states = 10
time_in_state_n = 5
objective = mock('objective')
constraint = mock('constraint')
solution = [1, 2, 3]
expect(nlp).build_objective(ls, state_vector, p). \
and_return(objective).once()
expect(nlp).build_constraint(
otf, migration_time, ls, state_vector,
p, time_in_states, time_in_state_n). \
and_return(constraint).once()
expect(b).solve2(objective, constraint, step, limit). \
and_return(solution).once()
self.assertEqual(
b.optimize(step, limit, otf, migration_time, ls,
p, state_vector, time_in_states, time_in_state_n),
solution)

View File

@ -1,166 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.overload.mhod.multisize_estimation as estimation
import neat.locals.overload.mhod.core as c
import logging
logging.disable(logging.CRITICAL)
class Core(TestCase):
def test_init_state(self):
state = c.init_state(100, [20, 40], 2)
self.assertEquals(state['previous_state'], 0)
self.assertEquals(state['previous_utilization'], [])
self.assertEquals(state['time_in_states'], 0)
self.assertEquals(state['time_in_state_n'], 0)
self.assertTrue('request_windows' in state)
self.assertTrue('estimate_windows' in state)
self.assertTrue('variances' in state)
self.assertTrue('acceptable_variances' in state)
def test_utilization_to_state(self):
state_config = [0.4, 0.7]
self.assertEqual(c.utilization_to_state(state_config, 0.0), 0)
self.assertEqual(c.utilization_to_state(state_config, 0.1), 0)
self.assertEqual(c.utilization_to_state(state_config, 0.2), 0)
self.assertEqual(c.utilization_to_state(state_config, 0.3), 0)
self.assertEqual(c.utilization_to_state(state_config, 0.4), 1)
self.assertEqual(c.utilization_to_state(state_config, 0.5), 1)
self.assertEqual(c.utilization_to_state(state_config, 0.6), 1)
self.assertEqual(c.utilization_to_state(state_config, 0.7), 2)
self.assertEqual(c.utilization_to_state(state_config, 0.8), 2)
self.assertEqual(c.utilization_to_state(state_config, 0.9), 2)
self.assertEqual(c.utilization_to_state(state_config, 1.0), 2)
self.assertEqual(c.utilization_to_state(state_config, 1.1), 2)
self.assertEqual(c.utilization_to_state([1.0], 0.0), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.1), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.2), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.3), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.4), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.5), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.6), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.7), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.8), 0)
self.assertEqual(c.utilization_to_state([1.0], 0.9), 0)
self.assertEqual(c.utilization_to_state([1.0], 1.0), 1)
self.assertEqual(c.utilization_to_state([1.0], 1.1), 1)
def test_build_state_vector(self):
state_config = [0.4, 0.7]
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.1]),
[1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.2]),
[1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.3]),
[1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.4]),
[0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.5]),
[0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.6]),
[0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.7]),
[0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.8]),
[0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.9]),
[0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 1.0]),
[0, 0, 1])
def test_get_current_state(self):
self.assertEqual(c.get_current_state([1, 0, 0]), 0)
self.assertEqual(c.get_current_state([0, 1, 0]), 1)
self.assertEqual(c.get_current_state([0, 0, 1]), 2)
def test_utilization_to_states(self):
state_config = [0.4, 0.7]
data = [0.25, 0.30, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.25, 0.38, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
states = [0, 0, 1, 1, 1, 2, 2, 2, 2, 1, 1,
1, 1, 0, 0, 1, 1, 1, 1, 1, 2]
self.assertEqual(c.utilization_to_states(state_config, data), states)
state_config = [1.0]
data = [0.5, 0.5, 1.0, 1.0, 0.5]
states = [0, 0, 1, 1, 0]
self.assertEqual(c.utilization_to_states(state_config, data), states)
def test_issue_command_deterministic(self):
self.assertEqual(c.issue_command_deterministic([1]), False)
self.assertEqual(c.issue_command_deterministic([]), True)
def test_mhod(self):
state_config = [1.0]
otf = 0.1
window_sizes = [30, 40]
bruteforce_step = 0.5
learning_steps = 0
time_step = 300
migration_time = 20.
utilization = [1.0]
state = c.init_state(10, window_sizes, 2)
with MockTransaction:
state['previous_utilization'] = []
expect(estimation).select_best_estimates.and_return([[0., 0.], [0., 0.]])
expect(c).get_current_state.and_return(1).once()
decision, _ = c.mhod(state_config, otf, window_sizes, bruteforce_step,
learning_steps, time_step, migration_time, utilization, state)
self.assertFalse(decision)
with MockTransaction:
state['previous_utilization'] = []
expect(estimation).select_best_estimates.and_return([[0., 0.], [0., 0.]])
expect(c).get_current_state.and_return(0).once()
decision, _ = c.mhod(state_config, otf, window_sizes, bruteforce_step,
learning_steps, time_step, migration_time, utilization, state)
self.assertFalse(decision)
with MockTransaction:
state['previous_utilization'] = []
expect(estimation).select_best_estimates.and_return([[0., 1.], [0., 1.]])
expect(c).get_current_state.and_return(0).once()
decision, _ = c.mhod(state_config, otf, window_sizes, bruteforce_step,
learning_steps, time_step, migration_time, utilization, state)
self.assertFalse(decision)
with MockTransaction:
state['previous_utilization'] = []
expect(estimation).select_best_estimates.and_return([[0., 1.], [0., 1.]])
expect(c).get_current_state.and_return(1).once()
decision, _ = c.mhod(state_config, otf, window_sizes, bruteforce_step,
learning_steps, time_step, migration_time, utilization, state)
self.assertTrue(decision)
# with MockTransaction:
# utilization = [1.0, 1.0]
# state['previous_utilization'] = [1.0, 1.0]
# state['time_in_states'] = 2
# expect(estimation).select_best_estimates.never()
# decision, _ = c.mhod(state_config, otf, window_sizes, bruteforce_step,
# learning_steps, time_step, migration_time, utilization, state)
# self.assertFalse(decision)
def deque_maxlen(coll):
return int(re.sub("\)$", "", re.sub(".*=", "", coll.__repr__())))

File diff suppressed because one or more lines are too long

View File

@ -1,40 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.overload.mhod.l_2_states as l
import logging
logging.disable(logging.CRITICAL)
class L2States(TestCase):
def test_l0(self):
p = [[0.4, 0.6],
[0.9, 0.1]]
p0 = [1, 0]
self.assertAlmostEqual(l.l0(p0, p, [0.2, 0.8]), 1.690, 3)
self.assertAlmostEqual(l.l0(p0, p, [0.62, 0.38]), 1.404, 3)
def test_l1(self):
p = [[0.4, 0.6],
[0.9, 0.1]]
p0 = [1, 0]
self.assertAlmostEqual(l.l1(p0, p, [0.2, 0.8]), 0.828, 3)
self.assertAlmostEqual(l.l1(p0, p, [0.62, 0.38]), 0.341, 3)

View File

@ -1,867 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
from collections import deque
from copy import deepcopy
import re
import neat.locals.overload.mhod.multisize_estimation as m
import logging
logging.disable(logging.CRITICAL)
def c(data):
return deepcopy(data)
class Multisize(TestCase):
def test_mean(self):
self.assertEqual(m.mean([], 100), 0.0)
self.assertEqual(m.mean([0], 100), 0.0)
self.assertEqual(m.mean([0, 0], 100), 0.0)
self.assertEqual(m.mean([1, 1], 100), 0.02)
self.assertEqual(m.mean([0, 1], 100), 0.01)
self.assertEqual(m.mean([1, 2, 3, 4, 5], 100), 0.15)
def test_variance(self):
self.assertEqual(m.variance([], 100), 0.0)
self.assertEqual(m.variance([0], 100), 0.0)
self.assertEqual(m.variance([0, 0], 100), 0.0)
self.assertAlmostEqual(m.variance([1, 1], 100), 0.0194020202)
self.assertAlmostEqual(m.variance([0, 1], 100), 0.0099010101)
self.assertAlmostEqual(m.variance([1, 2, 3, 4, 5], 100), 0.511237373)
self.assertAlmostEqual(m.variance([0, 0, 0, 1], 100), 0.0099030303)
def test_acceptable_variance(self):
self.assertAlmostEqual(m.acceptable_variance(0.2, 5), 0.032, 3)
self.assertAlmostEqual(m.acceptable_variance(0.6, 15), 0.016, 3)
def test_estimate_probability(self):
self.assertEqual(
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 0),
0.08)
self.assertEqual(
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 1),
0.02)
self.assertEqual(
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 0),
0.01)
self.assertEqual(
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 1),
0.04)
def test_update_request_windows(self):
max_window_size = 4
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1, 1])])
max_window_size = 2
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1])])
max_window_size = 4
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size),
deque([2, 2], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2),
[deque([0, 0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2),
[deque([0, 0]),
deque([1, 1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 2])])
max_window_size = 2
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size),
deque([2, 2], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2),
[deque([0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2),
[deque([0, 0]),
deque([1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
def test_update_estimate_windows(self):
req_win = [deque([1, 0, 0, 0]),
deque([1, 0, 1, 0])]
est_win = [[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}],
[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}]]
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 1.0]),
4: deque([0, 0, 0.75])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])}]])
req_win = [deque([1, 0, 2, 0]),
deque([1, 0, 1, 0]),
deque([2, 2, 1, 0])]
est_win = [[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}],
[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}],
[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}]]
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 2),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.5])}]])
def test_update_variances(self):
est_win = [[{2: deque([0, 0.5], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1.0, 0.5], 2),
4: deque([0, 1, 1, 1], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.25, 0.5, 0.5], 4)},
{2: deque([0.5, 0.75], 2),
4: deque([0.75, 0.75, 0.5, 0.5], 4)}]]
variances = [[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_variances(c(variances), c(est_win), 0),
[[{2: 0.125,
4: 0.25},
{2: 0.125,
4: 0.25}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_variances(c(variances), c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.03125,
4: 0.020833333333333332},
{2: 0.03125,
4: 0.020833333333333332}]])
self.assertEqual(m.update_variances(
m.update_variances(c(variances), c(est_win), 0), c(est_win), 0),
[[{2: 0.125,
4: 0.25},
{2: 0.125,
4: 0.25}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
est_win = [[{2: deque([0, 0], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1, 1], 2),
4: deque([0, 0, 1, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.05, 0.5, 0.25], 4)},
{2: deque([0.25, 0.5], 2),
4: deque([0.4, 0.55, 0.25, 0.5], 4)},
{2: deque([0.25, 0.25], 2),
4: deque([0.35, 0.4, 0.25, 0.25], 4)}],
[{2: deque([1, 0], 2),
4: deque([1, 0, 1, 0], 4)},
{2: deque([0, 1], 2),
4: deque([0, 0, 0, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}]]
variances = [[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_variances(c(variances), c(est_win), 0),
[[{2: 0.0,
4: 0.25},
{2: 0.0,
4: 0.3333333333333333},
{2: 0.0,
4: 0.25}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_variances(c(variances), c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.03125,
4: 0.03395833333333333},
{2: 0.03125,
4: 0.0175},
{2: 0.0,
4: 0.005625000000000001}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_variances(c(variances), c(est_win), 2),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.5,
4: 0.3333333333333333},
{2: 0.5,
4: 0.25},
{2: 0.0,
4: 0.25}]])
def test_update_acceptable_variances(self):
est_win = [[{2: deque([0, 0.5], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1.0, 0.5], 2),
4: deque([0, 1, 1, 1], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.25, 0.5, 0.5], 4)},
{2: deque([0.5, 0.75], 2),
4: deque([0.75, 0.75, 0.5, 0.5], 4)}]]
acc_variances = [[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 0),
[[{2: 0.125,
4: 0.0},
{2: 0.125,
4: 0.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.09375,
4: 0.0625},
{2: 0.09375,
4: 0.0625}]])
self.assertEqual(m.update_acceptable_variances(
m.update_acceptable_variances(
c(acc_variances), c(est_win), 0), c(est_win), 0),
[[{2: 0.125,
4: 0.0},
{2: 0.125,
4: 0.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
est_win = [[{2: deque([0, 0], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1, 1], 2),
4: deque([0, 0, 1, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.05, 0.5, 0.25], 4)},
{2: deque([0.25, 0.5], 2),
4: deque([0.4, 0.55, 0.25, 0.5], 4)},
{2: deque([0.25, 0.25], 2),
4: deque([0.35, 0.4, 0.25, 0.25], 4)}],
[{2: deque([1, 0], 2),
4: deque([1, 0, 1, 0], 4)},
{2: deque([0, 1], 2),
4: deque([0, 0, 0, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}]]
acc_variances = [[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 0),
[[{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.09375,
4: 0.046875},
{2: 0.125,
4: 0.0625},
{2: 0.09375,
4: 0.046875}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 2),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0}]])
def test_select_window(self):
variances = [[{2: 0.2,
4: 0.9},
{2: 0.2,
4: 0.6}],
[{2: 0.2,
4: 0},
{2: 0.2,
4: 0.8}]]
acc_variances = [[{2: 0.1,
4: 0.5},
{2: 0.4,
4: 0.5}],
[{2: 0.4,
4: 0.5},
{2: 0.1,
4: 0.5}]]
window_sizes = [2, 4]
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[2, 2],
[4, 2]])
variances = [[{2: 0,
4: 0.9},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0.8}]]
acc_variances = [[{2: 0.5,
4: 0.5},
{2: 0.6,
4: 0.5}],
[{2: 0.7,
4: 0.5},
{2: 0.4,
4: 0.5}]]
window_sizes = [2, 4]
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[2, 4],
[4, 2]])
variances = [[{2: 0,
4: 0.9},
{2: 0,
4: 0},
{2: 0,
4: 1.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0.8},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0.8},
{2: 0.5,
4: 0}]]
acc_variances = [[{2: 0.5,
4: 0.9},
{2: 0.6,
4: 0.9},
{2: 0.6,
4: 0.9}],
[{2: 0.7,
4: 0.9},
{2: 0.4,
4: 0.9},
{2: 0.4,
4: 0.9}],
[{2: 0.7,
4: 0.9},
{2: 0.4,
4: 0.5},
{2: 0.4,
4: 0.9}]]
window_sizes = [2, 4]
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[4, 4, 2],
[4, 4, 4],
[4, 2, 2]])
def test_select_best_estimates(self):
est_win = [[{2: deque([0, 0], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1, 1], 2),
4: deque([0, 0, 1, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.05, 0.5, 0.25], 4)},
{2: deque([0.25, 0.5], 2),
4: deque([0.4, 0.55, 0.25, 0.6], 4)},
{2: deque([0.25, 0.25], 2),
4: deque([0.35, 0.4, 0.25, 0.15], 4)}],
[{2: deque([1, 0], 2),
4: deque([1, 0, 1, 0], 4)},
{2: deque([0, 1], 2),
4: deque([0, 0, 0, 0.2], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}]]
selected_windows1 = [[2, 4, 2],
[2, 2, 4],
[4, 2, 2]]
selected_windows2 = [[4, 4, 4],
[2, 2, 2],
[2, 4, 2]]
self.assertEqual(
m.select_best_estimates(c(est_win), selected_windows1),
[[0, 1, 0],
[0.25, 0.5, 0.15],
[0, 1, 0]])
self.assertEqual(
m.select_best_estimates(c(est_win), selected_windows2),
[[0, 1, 0],
[0.25, 0.5, 0.25],
[0, 0.2, 0]])
est_win = [[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}]]
self.assertEqual(
m.select_best_estimates(c(est_win), [[2, 4], [4, 2]]),
[[0.0, 0.0],
[0.0, 0.0]])
self.assertEqual(
m.select_best_estimates(c(est_win), [[2, 2], [4, 4]]),
[[0.0, 0.0],
[0.0, 0.0]])
def test_init_request_windows(self):
structure = m.init_request_windows(1, 4)
self.assertEqual(structure, [deque()])
self.assertEqual(deque_maxlen(structure[0]), 4)
structure = m.init_request_windows(2, 4)
self.assertEqual(structure, [deque(),
deque()])
self.assertEqual(deque_maxlen(structure[0]), 4)
self.assertEqual(deque_maxlen(structure[1]), 4)
structure = m.init_request_windows(3, 4)
self.assertEqual(structure, [deque(),
deque(),
deque()])
self.assertEqual(deque_maxlen(structure[0]), 4)
self.assertEqual(deque_maxlen(structure[1]), 4)
self.assertEqual(deque_maxlen(structure[2]), 4)
def test_init_variances(self):
self.assertEqual(m.init_variances([2, 4], 1), [[{2: 1.0,
4: 1.0}]])
self.assertEqual(m.init_variances([2, 4], 2), [[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}],
[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}]])
self.assertEqual(m.init_variances([2, 4], 3), [[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}],
[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}],
[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}]])
def test_init_3_level_structure(self):
structure = m.init_deque_structure([2, 4], 1)
self.assertEqual(structure, [[{2: deque(),
4: deque()}]])
self.assertEqual(deque_maxlen(structure[0][0][2]), 2)
self.assertEqual(deque_maxlen(structure[0][0][4]), 4)
structure = m.init_deque_structure([2, 4], 2)
self.assertEqual(structure, [[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}]])
self.assertEqual(deque_maxlen(structure[0][0][2]), 2)
self.assertEqual(deque_maxlen(structure[0][0][4]), 4)
self.assertEqual(deque_maxlen(structure[0][1][2]), 2)
self.assertEqual(deque_maxlen(structure[0][1][4]), 4)
self.assertEqual(deque_maxlen(structure[1][0][2]), 2)
self.assertEqual(deque_maxlen(structure[1][0][4]), 4)
self.assertEqual(deque_maxlen(structure[1][1][2]), 2)
self.assertEqual(deque_maxlen(structure[1][1][4]), 4)
structure = m.init_deque_structure([2, 4], 3)
self.assertEqual(structure, [[{2: deque(),
4: deque()},
{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()},
{2: deque(),
4: deque()}]])
self.assertEqual(deque_maxlen(structure[0][0][2]), 2)
self.assertEqual(deque_maxlen(structure[0][0][4]), 4)
self.assertEqual(deque_maxlen(structure[0][1][2]), 2)
self.assertEqual(deque_maxlen(structure[0][1][4]), 4)
self.assertEqual(deque_maxlen(structure[0][2][2]), 2)
self.assertEqual(deque_maxlen(structure[0][2][4]), 4)
self.assertEqual(deque_maxlen(structure[1][0][2]), 2)
self.assertEqual(deque_maxlen(structure[1][0][4]), 4)
self.assertEqual(deque_maxlen(structure[1][1][2]), 2)
self.assertEqual(deque_maxlen(structure[1][1][4]), 4)
self.assertEqual(deque_maxlen(structure[1][2][2]), 2)
self.assertEqual(deque_maxlen(structure[1][2][4]), 4)
self.assertEqual(deque_maxlen(structure[2][0][2]), 2)
self.assertEqual(deque_maxlen(structure[2][0][4]), 4)
self.assertEqual(deque_maxlen(structure[2][1][2]), 2)
self.assertEqual(deque_maxlen(structure[2][1][4]), 4)
self.assertEqual(deque_maxlen(structure[2][2][2]), 2)
self.assertEqual(deque_maxlen(structure[2][2][4]), 4)
def test_init_selected_window_sizes(self):
self.assertEqual(
m.init_selected_window_sizes([2, 4], 1), [[2]])
self.assertEqual(
m.init_selected_window_sizes([2, 4], 2), [[2, 2],
[2, 2]])
self.assertEqual(
m.init_selected_window_sizes([2, 4], 3), [[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
def deque_maxlen(coll):
return int(re.sub("\)$", "", re.sub(".*=", "", coll.__repr__())))

View File

@ -1,70 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import operator
import neat.locals.overload.mhod.nlp as nlp
import logging
logging.disable(logging.CRITICAL)
class Nlp(TestCase):
def test_build_objective(self):
with MockTransaction:
state_vector = [1, 0]
p = [[-0.1, 0.1],
[0.3, -0.3]]
m1 = mock('m1')
m2 = mock('m2')
m = [m1, m2]
container = mock('function container')
expect(container).l0(state_vector, p, m).and_return(2).once()
expect(container).l1(state_vector, p, m).and_return(3).once()
ls = [container.l0, container.l1]
objective = nlp.build_objective(ls, state_vector, p)
self.assertTrue(hasattr(objective, '__call__'))
self.assertEqual(objective(m1, m2), 5)
def test_build_constraint(self):
with MockTransaction:
otf = 0.05
migration_time = 20.
state_vector = [1, 0]
p = [[-0.1, 0.1],
[0.3, -0.3]]
m1 = mock('m1')
m2 = mock('m2')
m = [m1, m2]
container = mock('function container')
expect(container).l0(state_vector, p, m).and_return(2).once()
expect(container).l1(state_vector, p, m). \
and_return(3).exactly(2).times()
ls = [container.l0, container.l1]
constraint = nlp.build_constraint(otf, migration_time,
ls, state_vector, p, 0, 0)
self.assertTrue(hasattr(constraint[0], '__call__'))
assert constraint[1] is operator.le
self.assertEqual(constraint[2], otf)
self.assertEqual(constraint[0](m1, m2),
float(migration_time + 3) /
(migration_time + 5))

View File

@ -1,113 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.overload.otf as otf
import logging
logging.disable(logging.CRITICAL)
class Otf(TestCase):
def test_otf(self):
state = {'overload': 0, 'total': 0}
decision, state = otf.otf(0.5, 1.0, 4, 1.,
[0.9], state)
self.assertEqual(state, {'overload': 0, 'total': 1})
self.assertFalse(decision)
decision, state = otf.otf(0.5, 1.0, 4, 1.,
[0.9, 1.3], state)
self.assertEqual(state, {'overload': 1, 'total': 2})
self.assertFalse(decision)
decision, state = otf.otf(0.5, 1.0, 4, 1.,
[0.9, 1.3, 1.1], state)
self.assertEqual(state, {'overload': 2, 'total': 3})
self.assertFalse(decision)
decision, state = otf.otf(0.5, 1.0, 4, 1.,
[0.9, 1.3, 1.1, 1.2], state)
self.assertEqual(state, {'overload': 3, 'total': 4})
self.assertTrue(decision)
decision, state = otf.otf(0.5, 1.0, 4, 100.,
[0.9, 1.3, 1.1, 1.2, 0.3], state)
self.assertEqual(state, {'overload': 3, 'total': 5})
self.assertFalse(decision)
decision, state = otf.otf(0.5, 1.0, 4, 1.,
[0.9, 1.3, 1.1, 1.2, 1.3], state)
self.assertEqual(state, {'overload': 4, 'total': 6})
self.assertTrue(decision)
decision, state = otf.otf(0.5, 1.0, 4, 1.,
[0.9, 1.3, 1.1, 1.2, 0.3, 0.2], state)
self.assertEqual(state, {'overload': 4, 'total': 7})
self.assertFalse(decision)
decision, state = otf.otf(0.5, 1.0, 4, 0.,
[0.9, 1.3, 1.1, 1.2, 0.3, 0.2, 0.1], state)
self.assertEqual(state, {'overload': 4, 'total': 8})
self.assertFalse(decision)
decision, state = otf.otf(0.5, 1.0, 4, 0.,
[0.9, 1.3, 1.1, 1.2, 0.3, 0.2, 0.1, 0.1], state)
self.assertEqual(state, {'overload': 4, 'total': 9})
self.assertFalse(decision)
def test_otf_factory(self):
alg = otf.otf_factory(30, 0.,
{'otf': 0.5, 'threshold': 1.0, 'limit': 4})
decision, state = alg([0.9], None)
self.assertEqual(state, {'overload': 0, 'total': 1})
self.assertFalse(decision)
decision, state = alg([0.9, 1.3], state)
self.assertEqual(state, {'overload': 1, 'total': 2})
self.assertFalse(decision)
decision, state = alg([0.9, 1.3, 1.1], state)
self.assertEqual(state, {'overload': 2, 'total': 3})
self.assertFalse(decision)
decision, state = alg([0.9, 1.3, 1.1, 1.2], state)
self.assertEqual(state, {'overload': 3, 'total': 4})
self.assertTrue(decision)
decision, state = alg([0.9, 1.3, 1.1, 1.2, 0.3], state)
self.assertEqual(state, {'overload': 3, 'total': 5})
self.assertFalse(decision)
decision, state = alg([0.9, 1.3, 1.1, 1.2, 1.3], state)
self.assertEqual(state, {'overload': 4, 'total': 6})
self.assertTrue(decision)
decision, state = alg([0.9, 1.3, 1.1, 1.2, 0.3, 0.2], state)
self.assertEqual(state, {'overload': 4, 'total': 7})
self.assertFalse(decision)
decision, state = alg([0.9, 1.3, 1.1, 1.2, 0.3, 0.2, 0.1], state)
self.assertEqual(state, {'overload': 4, 'total': 8})
self.assertFalse(decision)
decision, state = alg([0.9, 1.3, 1.1, 1.2, 0.3, 0.2, 0.1, 0.1], state)
self.assertEqual(state, {'overload': 4, 'total': 9})
self.assertFalse(decision)

View File

@ -1,183 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.overload.statistics as stats
import logging
logging.disable(logging.CRITICAL)
class Statistics(TestCase):
def test_loess_factory(self):
alg = stats.loess_factory(
300, 20., {'threshold': 1.0, 'param': 1.2, 'length': 3})
self.assertEqual(alg([]), (False, {}))
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
self.assertEqual(alg(data), (True, {}))
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
self.assertEqual(alg(data), (False, {}))
def test_loess_robust_factory(self):
alg = stats.loess_robust_factory(
300, 20., {'threshold': 1.0, 'param': 1.2, 'length': 3})
self.assertEqual(alg([]), (False, {}))
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
self.assertEqual(alg(data), (True, {}))
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
self.assertEqual(alg(data), (False, {}))
def test_mad_threshold_factory(self):
with MockTransaction:
expect(stats).mad.and_return(0.125).exactly(6).times()
alg = stats.mad_threshold_factory(
300, 20., {'threshold': 1.6, 'limit': 3})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0., 0., 0.0]), (False, {}))
self.assertEqual(alg([0., 0., 0.5]), (False, {}))
self.assertEqual(alg([0., 0., 0.6]), (False, {}))
self.assertEqual(alg([0., 0., 0.8]), (True, {}))
self.assertEqual(alg([0., 0., 0.9]), (True, {}))
self.assertEqual(alg([0., 0., 1.0]), (True, {}))
def test_iqr_threshold_factory(self):
with MockTransaction:
expect(stats).iqr.and_return(0.125).exactly(6).times()
alg = stats.iqr_threshold_factory(
300, 20., {'threshold': 1.6, 'limit': 3})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0., 0., 0.0]), (False, {}))
self.assertEqual(alg([0., 0., 0.5]), (False, {}))
self.assertEqual(alg([0., 0., 0.6]), (False, {}))
self.assertEqual(alg([0., 0., 0.8]), (True, {}))
self.assertEqual(alg([0., 0., 0.9]), (True, {}))
self.assertEqual(alg([0., 0., 1.0]), (True, {}))
def test_loess(self):
assert not stats.loess(1.0, 1.2, 3, 0.5, [])
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
assert stats.loess(1.0, 1.2, 3, 0.5, data)
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
assert not stats.loess(1.0, 1.2, 3, 0.5, data)
def test_loess_robust(self):
assert not stats.loess_robust(1.0, 1.2, 3, 0.5, [])
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
assert stats.loess_robust(1.0, 1.2, 3, 0.5, data)
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
assert not stats.loess_robust(1.0, 1.2, 3, 0.5, data)
def test_mad_threshold(self):
with MockTransaction:
expect(stats).mad.and_return(0.125).exactly(6).times()
assert not stats.mad_threshold(1., 3, [])
assert not stats.mad_threshold(1., 3, [0., 0., 0.])
assert not stats.mad_threshold(1.6, 3, [0., 0., 0.5])
assert not stats.mad_threshold(1.6, 3, [0., 0., 0.6])
assert stats.mad_threshold(1.6, 3, [0., 0., 0.8])
assert stats.mad_threshold(1.6, 3, [0., 0., 0.9])
assert stats.mad_threshold(1.6, 3, [0., 0., 1.0])
def test_iqr_threshold(self):
with MockTransaction:
expect(stats).iqr.and_return(0.125).exactly(6).times()
assert not stats.iqr_threshold(1., 3, [])
assert not stats.iqr_threshold(1., 3, [0., 0., 0.])
assert not stats.iqr_threshold(1.6, 3, [0., 0., 0.5])
assert not stats.iqr_threshold(1.6, 3, [0., 0., 0.6])
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.8])
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.9])
assert stats.iqr_threshold(1.6, 3, [0., 0., 1.0])
def test_utilization_threshold_abstract(self):
f = lambda x: 0.8
assert not stats.utilization_threshold_abstract(f, 3, [])
assert not stats.utilization_threshold_abstract(f, 3, [0., 0., 0.])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 1.0])
assert not stats.utilization_threshold_abstract(
f, 3, [0., 0., 0., 0.])
assert not stats.utilization_threshold_abstract(
f, 3, [0., 0., 0., 0.5])
assert not stats.utilization_threshold_abstract(
f, 3, [0., 0., 0., 0.7])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.8])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.9])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 1.0])
def test_mad(self):
data = [1, 1, 2, 2, 4, 6, 9]
assert stats.mad(data) == 1.
def test_iqr(self):
data = [105, 109, 107, 112, 102, 118, 115, 104, 110, 116, 108]
assert stats.iqr(data) == 10.
data = [2., 4., 7., -20., 22., -1., 0., -1., 7., 15., 8., 4.,
-4., 11., 11., 12., 3., 12., 18., 1.]
assert stats.iqr(data) == 12.
def test_loess_parameter_estimates(self):
data = [2., 4., 7., -20., 22., -1., 0., -1., 7., 15., 8., 4.,
-4., 11., 11., 12., 3., 12., 18., 1.]
estimates = stats.loess_parameter_estimates(data)
self.assertAlmostEqual(estimates[0], 2.2639, 3)
self.assertAlmostEqual(estimates[1], 0.3724, 3)
def test_loess_robust_parameter_estimates(self):
data = [2., 4., 7., -20., 22., -1., 0., -1., 7., 15., 8., 4.,
-4., 11., 11., 12., 3., 12., 18., 1.]
estimates = stats.loess_robust_parameter_estimates(data)
self.assertAlmostEqual(estimates[0], 2.4547, 3)
self.assertAlmostEqual(estimates[1], 0.3901, 3)
def test_tricube_weights(self):
for actual, expected in zip(
stats.tricube_weights(5),
[0.669, 0.669, 0.669, 0.953, 1.0]):
self.assertAlmostEqual(actual, expected, 2)
for actual, expected in zip(
stats.tricube_weights(10),
[0.148, 0.148, 0.148, 0.348, 0.568, 0.759,
0.892, 0.967, 0.995, 1.0]):
self.assertAlmostEqual(actual, expected, 2)
def test_tricube_bisquare_weights(self):
for actual, expected in zip(
stats.tricube_bisquare_weights([1., 1., 2., 2., 4., 6., 9.]),
[0.329, 0.329, 0.329, 0.633, 0.705, 0.554, 0.191]):
self.assertAlmostEqual(actual, expected, 2)

View File

@ -1,90 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.overload.trivial as trivial
import logging
logging.disable(logging.CRITICAL)
class Trivial(TestCase):
@qc(10)
def never_overloaded_factory(
time_step=int_(min=0, max=10),
migration_time=float_(min=0, max=10),
utilization=list_(of=float)
):
alg = trivial.never_overloaded_factory(time_step, migration_time, {})
assert alg(utilization) == (False, {})
def test_threshold_factory(self):
alg = trivial.threshold_factory(300, 20., {'threshold': 0.5})
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.6]), (True, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.5]), (False, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
self.assertEquals(alg([]), (False, {}))
def test_last_n_average_threshold_factory(self):
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5, 'n': 1})
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.6]), (True, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.5]), (False, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
self.assertEquals(alg([]), (False, {}))
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5, 'n': 2})
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.6]), (True, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 1.2, 0.4]), (True, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 0.4, 0.5]), (False, {}))
self.assertEquals(alg([0.9, 0.8, 1.1, 0.2, 0.3]), (False, {}))
self.assertEquals(alg([]), (False, {}))
def test_threshold(self):
self.assertTrue(trivial.threshold(0.5, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertTrue(trivial.threshold(0.5, [0.9, 0.8, 1.1, 1.2, 0.6]))
self.assertFalse(trivial.threshold(0.5, [0.9, 0.8, 1.1, 1.2, 0.5]))
self.assertFalse(trivial.threshold(0.5, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertFalse(trivial.threshold(0.5, []))
def test_last_n_average_threshold(self):
self.assertTrue(trivial.last_n_average_threshold(
0.5, 1, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertTrue(trivial.last_n_average_threshold(
0.5, 1, [0.9, 0.8, 1.1, 1.2, 0.6]))
self.assertFalse(trivial.last_n_average_threshold(
0.5, 1, [0.9, 0.8, 1.1, 1.2, 0.5]))
self.assertFalse(trivial.last_n_average_threshold(
0.5, 1, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertFalse(trivial.last_n_average_threshold(
0.5, 1, []))
self.assertTrue(trivial.last_n_average_threshold(
0.5, 2, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertTrue(trivial.last_n_average_threshold(
0.5, 2, [0.9, 0.8, 1.1, 1.2, 0.6]))
self.assertTrue(trivial.last_n_average_threshold(
0.5, 2, [0.9, 0.8, 1.1, 1.2, 0.4]))
self.assertFalse(trivial.last_n_average_threshold(
0.5, 2, [0.9, 0.8, 1.1, 0.4, 0.5]))
self.assertFalse(trivial.last_n_average_threshold(
0.5, 2, [0.9, 0.8, 1.1, 0.2, 0.3]))
self.assertFalse(trivial.last_n_average_threshold(0.5, 2, []))

View File

@ -1,602 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import os
import shutil
import libvirt
import neat.common as common
import neat.locals.collector as collector
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class Collector(TestCase):
@qc(10)
def start(
iterations=int_(min=0, max=10),
time_interval=int_(min=0)
):
with MockTransaction:
state = {'property': 'value'}
config = {
'log_directory': 'dir',
'log_level': 2,
'local_data_directory': 'data_dir',
'data_collector_interval': str(time_interval)}
paths = [collector.DEFAULT_CONFIG_PATH, collector.CONFIG_PATH]
fields = collector.REQUIRED_FIELDS
expect(collector).read_and_validate_config(paths, fields). \
and_return(config).once()
expect(common).init_logging('dir', 'data-collector.log', 2).once()
expect(common).start(collector.init_state,
collector.execute,
config,
time_interval).and_return(state).once()
assert collector.start() == state
def test_init_state(self):
with MockTransaction:
vir_connection = mock('virConnect')
expect(libvirt).openReadOnly(None). \
and_return(vir_connection).once()
physical_cpus = 13
expect(common).physical_cpu_count(vir_connection). \
and_return(physical_cpus).once()
config = {'sql_connection': 'db',
'host_cpu_overload_threshold': '0.95',
'host_cpu_usable_by_vms': '0.75',
'data_collector_data_length': '5'}
hostname = 'host1'
mhz = 13540
ram = 8192
expect(vir_connection).getHostname().and_return(hostname).once()
expect(collector).get_host_characteristics(vir_connection). \
and_return((mhz, ram)).once()
db = mock('db')
expect(collector).init_db('db').and_return(db).once()
expect(db).update_host(hostname,
int(mhz * 0.75),
physical_cpus,
ram).once()
state = collector.init_state(config)
assert state['previous_time'] == 0
assert isinstance(state['previous_cpu_time'], dict)
assert state['previous_host_cpu_time_total'] == 0.
assert state['previous_host_cpu_time_busy'] == 0.
assert state['previous_overload'] == -1
assert state['vir_connection'] == vir_connection
assert state['hostname'] == hostname
self.assertAlmostEqual(state['host_cpu_overload_threshold'],
0.7125, 3)
assert state['physical_cpus'] == physical_cpus
assert state['physical_cpu_mhz'] == mhz
assert state['physical_core_mhz'] == mhz / physical_cpus
assert state['db'] == db
@qc(1)
def get_previous_vms():
local_data_directory = os.path.join(
os.path.dirname(__file__), '..', 'resources', 'vms')
previous_vms = collector.get_previous_vms(local_data_directory)
assert 'ec452be0-e5d0-11e1-aff1-0800200c9a66' in previous_vms
assert 'e615c450-e5d0-11e1-aff1-0800200c9a66' in previous_vms
assert 'f3e142d0-e5d0-11e1-aff1-0800200c9a66' in previous_vms
@qc
def get_current_vms(
ids=dict_(
keys=int_(min=0, max=1000),
values=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=10
)
):
with MockTransaction:
def init_vm(id):
vm = mock('vm')
expect(vm).UUIDString().and_return(ids[id]).once()
expect(vm).state(0).and_return([id * 13, id]).once()
return vm
connection = libvirt.virConnect()
expect(connection).listDomainsID().and_return(ids.keys()).once()
if ids:
expect(connection).lookupByID(any_int) \
.and_call(lambda id: init_vm(id))
expected = dict((v, k * 13) for k, v in ids.items())
assert collector.get_current_vms(connection) == expected
@qc
def get_added_vms(
x=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
),
y=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
)
):
previous_vms = list(x)
if x:
x.pop(random.randrange(len(x)))
x.extend(y)
assert set(collector.get_added_vms(previous_vms, x)) == set(y)
@qc
def get_removed_vms(
x=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
),
y=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
)
):
prev_vms = list(x)
removed = []
if x:
to_remove = random.randrange(len(x))
for _ in xrange(to_remove):
removed.append(x.pop(random.randrange(len(x))))
x.extend(y)
assert set(collector.get_removed_vms(prev_vms, x)) == set(removed)
@qc
def substract_lists(
x=list_(of=int_(min=0, max=20), max_length=10),
y=list_(of=int_(min=0, max=20), max_length=10)
):
assert set(collector.substract_lists(x, y)) == \
set([item for item in x if item not in y])
@qc(1)
def cleanup_local_vm_data():
local_data_directory = os.path.join(
os.path.dirname(__file__), '..', 'resources', 'vms')
local_data_directory_tmp = os.path.join(
local_data_directory, 'tmp')
shutil.rmtree(local_data_directory_tmp, True)
os.mkdir(local_data_directory_tmp)
vm1 = 'ec452be0-e5d0-11e1-aff1-0800200c9a66'
vm2 = 'e615c450-e5d0-11e1-aff1-0800200c9a66'
vm3 = 'f3e142d0-e5d0-11e1-aff1-0800200c9a66'
shutil.copy(os.path.join(local_data_directory, vm1),
local_data_directory_tmp)
shutil.copy(os.path.join(local_data_directory, vm2),
local_data_directory_tmp)
shutil.copy(os.path.join(local_data_directory, vm3),
local_data_directory_tmp)
assert len(os.listdir(local_data_directory_tmp)) == 3
collector.cleanup_local_vm_data(local_data_directory_tmp,
[vm1, vm2, vm3])
assert len(os.listdir(local_data_directory_tmp)) == 0
os.rmdir(local_data_directory_tmp)
@qc(1)
def cleanup_all_local_data():
local_data_directory = os.path.join(
os.path.dirname(__file__), '..', 'resources', 'vms')
local_data_directory_tmp = os.path.join(
local_data_directory, 'tmp')
local_data_directory_tmp_vms = os.path.join(
local_data_directory_tmp, 'vms')
local_data_directory_tmp_host = os.path.join(
local_data_directory_tmp, 'host')
shutil.rmtree(local_data_directory_tmp, True)
os.mkdir(local_data_directory_tmp)
os.mkdir(local_data_directory_tmp_vms)
vm1 = 'ec452be0-e5d0-11e1-aff1-0800200c9a66'
vm2 = 'e615c450-e5d0-11e1-aff1-0800200c9a66'
vm3 = 'f3e142d0-e5d0-11e1-aff1-0800200c9a66'
shutil.copy(os.path.join(local_data_directory, vm1),
local_data_directory_tmp_vms)
shutil.copy(os.path.join(local_data_directory, vm2),
local_data_directory_tmp_vms)
shutil.copy(os.path.join(local_data_directory, vm3),
local_data_directory_tmp_vms)
shutil.copyfile(os.path.join(local_data_directory, vm1),
local_data_directory_tmp_host)
assert len(os.listdir(local_data_directory_tmp)) == 2
assert len(os.listdir(local_data_directory_tmp_vms)) == 3
collector.cleanup_all_local_data(local_data_directory_tmp)
assert len(os.listdir(local_data_directory_tmp)) == 1
assert len(os.listdir(local_data_directory_tmp_vms)) == 0
shutil.rmtree(local_data_directory_tmp, True)
@qc
def fetch_remote_data(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
),
data_length=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
if x:
for uuid, data in x.items():
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
for mhz in data:
db.vm_resource_usage.insert().execute(
vm_id=vm_id,
cpu_mhz=mhz)
x[uuid] = data[-data_length:]
assert collector.fetch_remote_data(db, data_length, x.keys()) == x
@qc
def write_vm_data_locally(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
collector.write_vm_data_locally(path, x, data_length)
files = os.listdir(path)
result = {}
for uuid in x.keys():
file = os.path.join(path, uuid)
with open(file, 'r') as f:
result[uuid] = [int(a)
for a in f.read().strip().splitlines()]
shutil.rmtree(path)
assert set(files) == set(x.keys())
for uuid, values in x.items():
if data_length > 0:
assert result[uuid] == values[-data_length:]
else:
assert result[uuid] == []
@qc
def append_vm_data_locally(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
int_(min=0, max=3000)),
min_length=0, max_length=3
),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
original_data = {}
to_append = {}
after_appending = {}
for uuid, data in x.items():
original_data[uuid] = data[0]
to_append[uuid] = data[1]
if data_length > 0:
after_appending[uuid] = list(data[0])
after_appending[uuid].append(data[1])
after_appending[uuid] = after_appending[uuid][-data_length:]
else:
after_appending[uuid] = []
collector.write_vm_data_locally(path, original_data, data_length)
collector.append_vm_data_locally(path, to_append, data_length)
files = os.listdir(path)
result = {}
for uuid in x.keys():
file = os.path.join(path, uuid)
with open(file, 'r') as f:
result[uuid] = [int(a)
for a in f.read().strip().splitlines()]
shutil.rmtree(path)
assert set(files) == set(x.keys())
for uuid in x.keys():
assert result[uuid] == after_appending[uuid]
@qc(10)
def append_vm_data_remotely(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=3000),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
db = db_utils.init_db('sqlite:///:memory:')
initial_data = []
data_to_submit = {}
final_data = {}
for uuid, data in vms.items():
vm_id = db.select_vm_id(uuid)
data_to_submit[uuid] = data[0]
final_data[uuid] = list(data[1])
final_data[uuid].append(data[0])
for cpu_mhz in data[1]:
initial_data.append({'vm_id': vm_id,
'cpu_mhz': cpu_mhz})
if initial_data:
db.vm_resource_usage.insert().execute(initial_data)
collector.append_vm_data_remotely(db, data_to_submit)
for uuid, data in final_data.items():
assert db.select_cpu_mhz_for_vm(uuid, 11) == data
@qc
def append_host_data_locally(
data=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
x=int_(min=0, max=3000),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'host')
with open(path, 'w') as f:
f.write('\n'.join([str(x)
for x in data]) + '\n')
collector.append_host_data_locally(path, x, data_length)
if data_length > 0:
data.append(x)
expected = data[-data_length:]
else:
expected = []
with open(path, 'r') as f:
actual = [int(x)
for x in f.read().strip().splitlines()]
os.remove(path)
assert actual == expected
@qc(10)
def append_host_data_remotely(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=int_(min=0, max=3000)
):
db = db_utils.init_db('sqlite:///:memory:')
db.update_host(hostname, 1, 1, 1)
collector.append_host_data_remotely(db, hostname, cpu_mhz)
assert db.select_cpu_mhz_for_host(hostname, 1) == [cpu_mhz]
@qc
def get_cpu_mhz(
cpus=int_(min=1, max=8),
current_time=float_(min=100, max=1000),
time_period=float_(min=1, max=100),
vm_data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=two(of=int_(min=1, max=100)),
min_length=0, max_length=10
),
added_vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=100),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
with MockTransaction:
def mock_get_cpu_time(vir_connection, uuid):
if uuid in original_vm_data:
return original_vm_data[uuid][0] + \
original_vm_data[uuid][1]
else:
return added_vms[uuid][0]
original_vm_data = dict(vm_data)
previous_time = current_time - time_period
connection = libvirt.virConnect()
when(collector).get_cpu_time(connection, any_string). \
then_call(mock_get_cpu_time)
previous_cpu_time = {}
cpu_mhz = {}
for uuid, data in vm_data.items():
previous_cpu_time[uuid] = data[0]
if vm_data:
to_remove = random.randrange(len(vm_data))
for _ in xrange(to_remove):
tmp = random.choice(vm_data.keys())
del vm_data[tmp]
vms = vm_data.keys()
current_cpu_time = {}
for uuid in vms:
current_cpu_time[uuid] = vm_data[uuid][0] + vm_data[uuid][1]
cpu_mhz[uuid] = collector.calculate_cpu_mhz(
cpus, previous_time, current_time,
vm_data[uuid][0], vm_data[uuid][0] + vm_data[uuid][1])
added_vm_data = {}
if added_vms:
for uuid, data in added_vms.items():
current_cpu_time[uuid] = data[0]
added_vm_data[uuid] = data[1]
if data[1]:
cpu_mhz[uuid] = data[1][-1]
vms.extend(added_vms.keys())
result = collector.get_cpu_mhz(
connection, cpus, previous_cpu_time,
previous_time, current_time, vms,
{}, added_vm_data)
assert result[0] == current_cpu_time
assert result[1] == cpu_mhz
@qc(10)
def get_cpu_time(
uuid=str_(of='abc123-', min_length=36, max_length=36),
x=int_(min=0)
):
with MockTransaction:
connection = libvirt.virConnect()
domain = mock('domain')
expect(connection).lookupByUUIDString(uuid). \
and_return(domain).once()
expect(domain).getCPUStats(True, 0). \
and_return([{'cpu_time': x}]).once()
assert collector.get_cpu_time(connection, uuid) == x
@qc
def calculate_cpu_mhz(
current_time=float_(min=100, max=1000),
time_period=float_(min=1, max=100),
current_cpu_time=int_(min=100),
cpu_time=int_(min=0, max=100),
mhz=int_(min=1, max=3000)
):
previous_time = current_time - time_period
previous_cpu_time = current_cpu_time - cpu_time
assert collector. \
calculate_cpu_mhz(mhz, previous_time, current_time,
previous_cpu_time, current_cpu_time) == \
int((mhz * cpu_time / (time_period * 1000000000)))
@qc
def get_host_cpu_mhz(
cpu_mhz=int_(min=1, max=1000),
prev_total=float_(min=100, max=1000),
prev_busy=float_(min=1, max=100),
diff_total=float_(min=100, max=1000),
diff_busy=float_(min=1, max=100)
):
with MockTransaction:
total = prev_total + diff_total
busy = prev_busy + diff_busy
expect(collector).get_host_cpu_time(). \
and_return((total, busy)).once()
assert collector.get_host_cpu_mhz(cpu_mhz, prev_total, prev_busy) == \
(total,
busy,
int(cpu_mhz * diff_busy / diff_total))
@qc(1)
def get_host_cpu_mhz_exception():
cpu_mhz = 1
total = 1.
prev_total = 0.
busy = 1.
prev_busy = 2.
with MockTransaction:
expect(collector).get_host_cpu_time(). \
and_return((total, busy)).once()
try:
collector.get_host_cpu_mhz(cpu_mhz, prev_total, prev_busy)
assert False
except ValueError:
assert True
@qc(10)
def get_host_cpu_time(
x=list_(of=int_(min=1, max=1000), min_length=7, max_length=7)
):
with MockTransaction:
context = mock('context')
f = mock('file')
expect(context).__enter__().and_return(f).once()
when(context).__exit__.and_return(True)
expect(collector).open('/proc/stat', 'r').and_return(context).once()
expect(f).readline().and_return(
'1 ' + ' '.join([str(v) for v in x]) + ' 2 3').once()
assert collector.get_host_cpu_time() == (float(sum(x)),
float(sum(x[0:3])))
@qc(10)
def get_host_characteristics(
ram=int_(min=1, max=4000),
cores=int_(min=1, max=8),
mhz=int_(min=1, max=3000)
):
with MockTransaction:
connection = libvirt.virConnect()
expect(connection).getInfo().and_return(
['x86_64', ram, cores, mhz, 1, 1, 4, 2]).once()
assert collector.get_host_characteristics(connection) == \
(cores * mhz, ram)
@qc(10)
def get_host_characteristics_long(
ram=int_(min=1, max=4000),
cores=int_(min=1, max=8),
mhz=int_(min=1, max=3000)
):
with MockTransaction:
connection = libvirt.virConnect()
expect(connection).getInfo().and_return(
['x86_64', long(ram), cores, mhz, 1, 1, 4, 2]).once()
assert collector.get_host_characteristics(connection) == \
(cores * mhz, long(ram))
@qc(1)
def log_host_overload():
db = db_utils.init_db('sqlite:///:memory:')
with MockTransaction:
expect(db).insert_host_overload('host', 1).once()
assert collector.log_host_overload(db, 0.9, 'host', -1, 3000, 2800)
with MockTransaction:
expect(db).insert_host_overload('host', 0).once()
assert not collector.log_host_overload(db, 0.9, 'host', -1, 3000, 2600)
with MockTransaction:
expect(db).insert_host_overload('host', 1).once()
assert collector.log_host_overload(db, 0.9, 'host', 0, 3000, 2800)
with MockTransaction:
expect(db).insert_host_overload('host', 0).once()
assert not collector.log_host_overload(db, 0.9, 'host', 1, 3000, 2600)
with MockTransaction:
expect(db).insert_host_overload.never()
assert collector.log_host_overload(db, 0.9, 'host', 1, 3000, 2800)
with MockTransaction:
expect(db).insert_host_overload.never()
assert not collector.log_host_overload(db, 0.9, 'host', 0, 3000, 2600)
def deque_maxlen(coll):
return int(re.sub("\)$", "", re.sub(".*=", "", coll.__repr__())))

View File

@ -1,227 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import shutil
import libvirt
from hashlib import sha1
import neat.locals.manager as manager
import neat.common as common
import neat.locals.collector as collector
import logging
logging.disable(logging.CRITICAL)
class LocalManager(TestCase):
@qc(10)
def start(
iterations=int_(min=0, max=10),
time_interval=int_(min=0)
):
with MockTransaction:
state = {'property': 'value'}
config = {
'log_directory': 'dir',
'log_level': 2,
'local_manager_interval': str(time_interval)}
paths = [manager.DEFAULT_CONFIG_PATH, manager.CONFIG_PATH]
fields = manager.REQUIRED_FIELDS
expect(manager).read_and_validate_config(paths, fields). \
and_return(config).once()
expect(common).init_logging('dir', 'local-manager.log', 2).once()
expect(common).start(manager.init_state,
manager.execute,
config,
time_interval).and_return(state).once()
assert manager.start() == state
@qc(1)
def init_state():
with MockTransaction:
vir_connection = mock('virConnect')
db = mock('db')
mhz = 3000
expect(libvirt).openReadOnly(None). \
and_return(vir_connection).once()
expect(manager).init_db('db'). \
and_return(db).once()
expect(common).physical_cpu_mhz_total(vir_connection). \
and_return(mhz)
expect(vir_connection).getHostname().and_return('host').once()
config = {'sql_connection': 'db',
'os_admin_user': 'user',
'os_admin_password': 'password',
'host_cpu_usable_by_vms': 0.75}
state = manager.init_state(config)
assert state['previous_time'] == 0
assert state['vir_connection'] == vir_connection
assert state['db'] == db
assert state['physical_cpu_mhz_total'] == mhz * 0.75
assert state['hostname'] == 'host'
assert state['hashed_username'] == sha1('user').hexdigest()
assert state['hashed_password'] == sha1('password').hexdigest()
@qc(1)
def get_local_vm_data(
data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=5
)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
collector.write_vm_data_locally(path, data, 10)
assert manager.get_local_vm_data(path) == data
shutil.rmtree(path)
@qc(1)
def get_local_host_data(
data=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'host')
assert manager.get_local_host_data(path) == []
with open(path, 'w') as f:
f.write('\n'.join([str(x)
for x in data]) + '\n')
assert manager.get_local_host_data(path) == data
os.remove(path)
@qc(10)
def cleanup_vm_data(
data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=5
)
):
original_data = dict(data)
uuids = data.keys()
if data:
n = random.randrange(len(data))
for _ in range(n):
uuid = random.choice(uuids)
del data[uuid]
uuids.remove(uuid)
assert manager.cleanup_vm_data(original_data, uuids) == data
@qc(10)
def get_ram(
data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=int_(min=1, max=100),
min_length=0, max_length=10
)
):
with MockTransaction:
def mock_get_max_ram(vir_connection, uuid):
return data[uuid]
connection = libvirt.virConnect()
when(manager).get_max_ram(connection, any_string). \
then_call(mock_get_max_ram)
assert manager.get_ram(connection, data.keys()) == data
@qc(10)
def get_ram_long(
data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=int_(min=1, max=100),
min_length=0, max_length=10
)
):
data = dict([(k, long(v)) for (k, v) in data.iteritems()])
with MockTransaction:
def mock_get_max_ram(vir_connection, uuid):
return data[uuid]
connection = libvirt.virConnect()
when(manager).get_max_ram(connection, any_string). \
then_call(mock_get_max_ram)
assert manager.get_ram(connection, data.keys()) == data
@qc(10)
def get_max_ram(
uuid=str_(of='abc123-', min_length=36, max_length=36),
x=int_(min=0)
):
with MockTransaction:
connection = libvirt.virConnect()
domain = mock('domain')
expect(connection).lookupByUUIDString(uuid). \
and_return(domain).once()
expect(domain).maxMemory().and_return(x).once()
assert manager.get_max_ram(connection, uuid) == int(x) / 1024
@qc(10)
def get_max_ram_long(
uuid=str_(of='abc123-', min_length=36, max_length=36),
x=int_(min=0)
):
with MockTransaction:
connection = libvirt.virConnect()
domain = mock('domain')
expect(connection).lookupByUUIDString(uuid). \
and_return(domain).once()
expect(domain).maxMemory().and_return(long(x)).once()
assert manager.get_max_ram(connection, uuid) == long(x) / 1024
@qc(1)
def get_max_ram_none(
uuid=str_(of='abc123-', min_length=36, max_length=36)
):
with MockTransaction:
def raise_libvirt_error():
raise libvirt.libvirtError(None)
connection = libvirt.virConnect()
expect(connection).lookupByUUIDString(uuid). \
and_call(lambda _: raise_libvirt_error())
assert manager.get_max_ram(connection, uuid) is None
def test_vm_mhz_to_percentage(self):
self.assertEqual(manager.vm_mhz_to_percentage(
[[100, 200, 300],
[300, 100, 300, 200],
[100, 100, 700]],
[300, 0, 300],
3000),
[0.1, 0.2, 0.2, 0.5])
self.assertEqual(manager.vm_mhz_to_percentage(
[[100, 200, 300],
[100, 300, 200],
[100, 100, 700]],
[0, 300],
3000),
[0.1, 0.2, 0.5])
self.assertEqual(manager.vm_mhz_to_percentage(
[[100, 200, 300],
[300, 100, 300, 200],
[100, 100, 700]],
[300, 0, 300, 0, 300],
3000),
[0.1, 0.2, 0.2, 0.5])

View File

@ -1,94 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.underload.trivial as trivial
import logging
logging.disable(logging.CRITICAL)
class Trivial(TestCase):
@qc(10)
def always_underloaded_factory(
time_step=int_(min=0, max=10),
migration_time=float_(min=0, max=10),
utilization=list_(of=float)
):
alg = trivial.always_underloaded_factory(time_step, migration_time, {})
assert alg(utilization) == (True, {})
def test_threshold_factory(self):
alg = trivial.threshold_factory(300, 20., {'threshold': 0.5})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (False, {}))
self.assertEqual(alg([0.0, 1.0]), (False, {}))
def test_last_n_average_threshold_factory(self):
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 2})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (True, {}))
self.assertEqual(alg([0.0, 1.0]), (True, {}))
self.assertEqual(alg([0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 1.0, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.6, 0.6]), (False, {}))
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 3})
self.assertEqual(alg([0.0, 0.6, 0.6]), (True, {}))
def test_threshold(self):
self.assertEqual(trivial.threshold(0.5, []), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.0]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.4]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.5]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.6]), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 1.0]), False)
def test_last_n_average_threshold(self):
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, []), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.4]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.5]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6, 0.6]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 3, [0.0, 0.6, 0.6]), True)

View File

@ -1,205 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.vm_selection.algorithms as selection
import logging
logging.disable(logging.CRITICAL)
class Selection(TestCase):
@qc(10)
def minimum_migration_time_factory(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=int_(min=0, max=3000),
min_length=1, max_length=5
)
):
alg = selection.minimum_migration_time_factory(300, 20., dict())
values = x.values()
vm_index = values.index(min(values))
vm = x.keys()[vm_index]
assert alg(dict(), x) == ([vm], {})
@qc(10)
def minimum_utilization_factory(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
min_length=1, max_length=5
)
):
alg = selection.minimum_utilization_factory(300, 20., dict())
last_utilization = []
for utilization in x.values():
last_utilization.append(utilization[-1])
vm_index = last_utilization.index(min(last_utilization))
vm = x.keys()[vm_index]
assert alg(x, dict()) == ([vm], {})
@qc(10)
def random_factory(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=1, max_length=3
)
):
with MockTransaction:
alg = selection.random_factory(300, 20., dict())
vm = x.keys()[random.randrange(len(x))]
expect(selection).choice(x.keys()).and_return(vm).once()
assert alg(x, dict()) == ([vm], {})
@qc(10)
def minimum_migration_time_max_cpu_factory(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
int_(min=0, max=3000)),
min_length=1, max_length=5
),
last_n=int_(min=1, max=10)
):
alg = selection.minimum_migration_time_max_cpu_factory(
300, 20., {'last_n': last_n})
vms_cpu = dict((k, v[0]) for k, v in x.items())
vms_ram = dict((k, v[1]) for k, v in x.items())
min_ram = min(vms_ram.values())
min_ram_vms_cpu = dict((k, float(sum(v[-last_n:])) / len(v[-last_n:]))
for k, v in vms_cpu.items()
if vms_ram[k] == min_ram and len(v[-last_n:]) > 0)
values = min_ram_vms_cpu.values()
vm_index = values.index(max(values))
vm = min_ram_vms_cpu.keys()[vm_index]
assert alg(vms_cpu, vms_ram) == ([vm], {})
@qc(10)
def minimum_migration_time_max_cpu_factory_equal_ram(
vms_cpu=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
min_length=1, max_length=5
),
ram=int_(min=1000, max=3000),
last_n=int_(min=1, max=10)
):
alg = selection.minimum_migration_time_max_cpu_factory(
300, 20., {'last_n': last_n})
vms_ram = dict((k, ram) for k, _ in vms_cpu.items())
min_ram = min(vms_ram.values())
min_ram_vms_cpu = dict((k, float(sum(v[-last_n:])) / len(v[-last_n:]))
for k, v in vms_cpu.items()
if vms_ram[k] == min_ram and len(v[-last_n:]) > 0)
values = min_ram_vms_cpu.values()
vm_index = values.index(max(values))
vm = min_ram_vms_cpu.keys()[vm_index]
assert alg(vms_cpu, vms_ram) == ([vm], {})
@qc(10)
def minimum_migration_time(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=int_(min=0, max=3000),
min_length=1, max_length=5
)
):
values = x.values()
vm_index = values.index(min(values))
vm = x.keys()[vm_index]
assert selection.minimum_migration_time(x) == vm
@qc(10)
def minimum_utilization(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
min_length=1, max_length=5
)
):
last_utilization = []
for utilization in x.values():
last_utilization.append(utilization[-1])
vm_index = last_utilization.index(min(last_utilization))
vm = x.keys()[vm_index]
assert selection.minimum_utilization(x) == vm
@qc(10)
def random(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=1, max_length=3
)
):
with MockTransaction:
vm = x.keys()[random.randrange(len(x))]
expect(selection).choice(x.keys()).and_return(vm).once()
assert selection.random(x) == vm
@qc(10)
def minimum_migration_time_max_cpu(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
int_(min=0, max=3000)),
min_length=1, max_length=5
),
last_n=int_(min=1, max=10)
):
vms_cpu = dict((k, v[0]) for k, v in x.items())
vms_ram = dict((k, v[1]) for k, v in x.items())
min_ram = min(vms_ram.values())
min_ram_vms_cpu = dict((k, float(sum(v[-last_n:])) / len(v[-last_n:]))
for k, v in vms_cpu.items()
if vms_ram[k] == min_ram and len(v[-last_n:]) > 0)
values = min_ram_vms_cpu.values()
vm_index = values.index(max(values))
vm = min_ram_vms_cpu.keys()[vm_index]
assert selection.minimum_migration_time_max_cpu(
last_n, vms_cpu, vms_ram) == vm
@qc(10)
def minimum_migration_time_max_cpu_equal_ram(
vms_cpu=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
min_length=1, max_length=5
),
ram=int_(min=1000, max=3000),
last_n=int_(min=1, max=10)
):
vms_ram = dict((k, ram) for k, _ in vms_cpu.items())
min_ram = min(vms_ram.values())
min_ram_vms_cpu = dict((k, float(sum(v[-last_n:])) / len(v[-last_n:]))
for k, v in vms_cpu.items()
if vms_ram[k] == min_ram and len(v[-last_n:]) > 0)
values = min_ram_vms_cpu.values()
vm_index = values.index(max(values))
vm = min_ram_vms_cpu.keys()[vm_index]
assert selection.minimum_migration_time_max_cpu(
last_n, vms_cpu, vms_ram) == vm

View File

@ -1,191 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import os
import shutil
import libvirt
import neat.common as common
import logging
logging.disable(logging.CRITICAL)
class Common(TestCase):
@qc(10)
def start(iterations=int_(0, 10)):
with MockTransaction:
config = {'option': 'value'}
state = {'property': 'value'}
fn = mock('function container')
expect(fn).init_state(any_dict).and_return(state).once()
expect(fn).execute(any_dict, any_dict). \
and_return(state).exactly(iterations).times()
assert common.start(fn.init_state,
fn.execute,
config,
0,
iterations) == state
@qc(10)
def build_local_vm_path(
x=str_(of='abc123_-/')
):
assert common.build_local_vm_path(x) == os.path.join(x, 'vms')
@qc(10)
def build_local_host_path(
x=str_(of='abc123_-/')
):
assert common.build_local_host_path(x) == os.path.join(x, 'host')
@qc(10)
def physical_cpu_count(x=int_(min=0, max=8)):
with MockTransaction:
connection = libvirt.virConnect()
expect(connection).getInfo().and_return([0, 0, x]).once()
assert common.physical_cpu_count(connection) == x
@qc(10)
def physical_cpu_mhz(x=int_(min=0, max=8)):
with MockTransaction:
connection = libvirt.virConnect()
expect(connection).getInfo().and_return([0, 0, 0, x]).once()
assert common.physical_cpu_mhz(connection) == x
@qc(10)
def physical_cpu_mhz_total(x=int_(min=0, max=8), y=int_(min=0, max=8)):
with MockTransaction:
connection = libvirt.virConnect()
expect(common).physical_cpu_count(connection). \
and_return(x).once()
expect(common).physical_cpu_mhz(connection). \
and_return(y).once()
assert common.physical_cpu_mhz_total(connection) == x * y
def test_frange(self):
self.assertEqual([round(x, 1) for x in common.frange(0, 1.0, 0.5)],
[0.0, 0.5, 1.0])
self.assertEqual([round(x, 1) for x in common.frange(0, 1.0, 0.2)],
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
def test_init_logging(self):
log_dir = os.path.join(
os.path.dirname(__file__), 'resources', 'log')
log_file = 'test.log'
log_path = os.path.join(log_dir, log_file)
with MockTransaction:
logging.root = mock('root')
expect(logging).disable(logging.CRITICAL).once()
expect(logging.root).setLevel.never()
expect(logging.root).addHandler.never()
assert common.init_logging(log_dir, log_file, 0)
with MockTransaction:
shutil.rmtree(log_dir, True)
logging.root = mock('root')
expect(logging).disable.never()
expect(logging.root).setLevel(logging.WARNING).once()
handler = mock('handler')
expect(logging).FileHandler(log_path).and_return(handler).once()
expect(handler).setFormatter.and_return(True).once()
expect(logging).Formatter(
'%(asctime)s %(levelname)-8s %(name)s %(message)s').once()
expect(logging.root).addHandler.once()
assert common.init_logging(log_dir, log_file, 1)
assert os.access(log_dir, os.W_OK)
with MockTransaction:
logging.root = mock('root')
expect(logging).disable.never()
expect(logging.root).setLevel(logging.INFO).once()
handler = mock('handler')
expect(logging).FileHandler(log_path).and_return(handler).once()
expect(handler).setFormatter.and_return(True).once()
expect(logging).Formatter(
'%(asctime)s %(levelname)-8s %(name)s %(message)s').once()
expect(logging.root).addHandler.once()
assert common.init_logging(log_dir, log_file, 2)
assert os.access(log_dir, os.W_OK)
with MockTransaction:
logging.root = mock('root')
expect(logging).disable.never()
expect(logging.root).setLevel(logging.DEBUG).once()
handler = mock('handler')
expect(logging).FileHandler(log_path).and_return(handler).once()
expect(handler).setFormatter.and_return(True).once()
expect(logging).Formatter(
'%(asctime)s %(levelname)-8s %(name)s %(message)s').once()
expect(logging.root).addHandler.once()
assert common.init_logging(log_dir, log_file, 3)
assert os.access(log_dir, os.W_OK)
shutil.rmtree(log_dir, True)
def test_call_function_by_name(self):
with MockTransaction:
arg1 = 'a'
arg2 = 'b'
expect(common).func_to_call(arg1, arg2).and_return('res').once()
assert common.call_function_by_name('neat.common.func_to_call',
[arg1, arg2]) == 'res'
def test_parse_parameters(self):
params = '{"param1": 0.56, "param2": "abc"}'
self.assertEqual(common.parse_parameters(params), {'param1': 0.56,
'param2': 'abc'})
def test_parse_compute_hosts(self):
assert common.parse_compute_hosts('') == []
assert common.parse_compute_hosts('test1, test2') == \
['test1', 'test2']
assert common.parse_compute_hosts('test-1, test_2') == \
['test-1', 'test_2']
assert common.parse_compute_hosts('t1,, t2 , t3') == \
['t1', 't2', 't3']
@qc(10)
def calculate_migration_time(
data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=int_(min=1, max=1000),
min_length=1, max_length=10
),
bandwidth=float_(min=1., max=100.)
):
ram = data.values()
migration_time = float(sum(ram)) / len(ram) / bandwidth
assert common.calculate_migration_time(data, bandwidth) == \
migration_time
@qc(10)
def calculate_migration_time_long(
data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=int_(min=1, max=1000),
min_length=1, max_length=10
),
bandwidth=float_(min=1., max=100.)
):
data = dict([(k, long(v)) for (k, v) in data.iteritems()])
ram = data.values()
migration_time = float(sum(ram)) / len(ram) / bandwidth
assert common.calculate_migration_time(data, bandwidth) == \
migration_time

View File

@ -1,87 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.config as config
import logging
logging.disable(logging.CRITICAL)
class Config(TestCase):
@qc
def read_default_config():
paths = [config.DEFAULT_CONFIG_PATH]
test_config = config.read_config(paths)
assert config.validate_config(test_config, config.REQUIRED_FIELDS)
@qc
def read_config():
paths = [config.DEFAULT_CONFIG_PATH, config.CONFIG_PATH]
test_config = config.read_config(paths)
assert config.validate_config(test_config, config.REQUIRED_FIELDS)
@qc
def validate_valid_config(
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=10)
):
test_config = dict(zip(x, x))
assert config.validate_config(test_config, x)
@qc
def validate_invalid_config(
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=5),
y=list_(of=str_(of='abc123_', max_length=20),
min_length=6, max_length=10)
):
test_config = dict(zip(x, x))
assert not config.validate_config(test_config, y)
@qc(10)
def read_and_validate_valid_config(
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=10)
):
with MockTransaction:
test_config = dict(zip(x, x))
paths = ['path1', 'path2']
expect(config).read_config(paths).and_return(test_config).once()
expect(config).validate_config(test_config, x). \
and_return(True).once()
assert config.read_and_validate_config(paths, x) == test_config
@qc(10)
def read_and_validate_invalid_config(
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=5),
y=list_(of=str_(of='abc123_', max_length=20),
min_length=6, max_length=10)
):
with MockTransaction:
test_config = dict(zip(x, x))
paths = [config.DEFAULT_CONFIG_PATH, config.CONFIG_PATH]
expect(config).read_config(paths).and_return(test_config).once()
expect(config).validate_config(test_config, y). \
and_return(False).once()
try:
config.read_and_validate_config(paths, y)
except KeyError:
assert True
else:
assert False

View File

@ -1,355 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import datetime
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class Db(TestCase):
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='test')
assert db.vms.select().execute().first()['uuid'] == 'test'
db.vm_resource_usage.insert().execute(vm_id=1, cpu_mhz=1000)
assert db.vm_resource_usage.select(). \
execute().first()['cpu_mhz'] == 1000
@qc(10)
def select_cpu_mhz_for_vm(
uuid=str_(of='abc123-', min_length=36, max_length=36),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
for mhz in cpu_mhz:
db.vm_resource_usage.insert().execute(
vm_id=vm_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_vm(uuid, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_vms(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for uuid, data in vms.items():
for value in data:
db.insert_vm_cpu_mhz({uuid: value})
if data:
res[uuid] = data[-1]
assert db.select_last_cpu_mhz_for_vms() == res
@qc(10)
def select_vm_id(
uuid1=str_(of='abc123-', min_length=36, max_length=36),
uuid2=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid1)
vm_id = result.inserted_primary_key[0]
assert db.select_vm_id(uuid1) == vm_id
assert db.select_vm_id(uuid2) == vm_id + 1
@qc(10)
def insert_vm_cpu_mhz(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=3000),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
db = db_utils.init_db('sqlite:///:memory:')
initial_data = []
data_to_submit = {}
final_data = {}
for uuid, data in vms.items():
vm_id = db.select_vm_id(uuid)
data_to_submit[uuid] = data[0]
final_data[uuid] = list(data[1])
final_data[uuid].append(data[0])
for cpu_mhz in data[1]:
initial_data.append({'vm_id': vm_id,
'cpu_mhz': cpu_mhz})
if initial_data:
db.vm_resource_usage.insert().execute(initial_data)
db.insert_vm_cpu_mhz(data_to_submit)
for uuid, data in final_data.items():
assert db.select_cpu_mhz_for_vm(uuid, 11) == data
@qc(1)
def update_host():
db = db_utils.init_db('sqlite:///:memory:')
db.update_host('host1', 3000, 4, 4000)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3000
assert host['cpu_cores'] == 4
assert host['ram'] == 4000
db.update_host('host1', 3500, 8, 8000L)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3500
assert host['cpu_cores'] == 8
assert host['ram'] == 8000L
@qc(10)
def select_cpu_mhz_for_host(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
for mhz in cpu_mhz:
db.host_resource_usage.insert().execute(
host_id=host_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_host(hostname, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=5, max_length=10),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for hostname, data in hosts.items():
db.update_host(hostname, 1, 1, 1)
for value in data:
db.insert_host_cpu_mhz(hostname, value)
if data:
res[hostname] = data[-1]
else:
res[hostname] = 0
assert db.select_last_cpu_mhz_for_hosts() == res
@qc(10)
def insert_host_cpu_mhz(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=1, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
db.update_host(hostname, 1, 1, 1)
for value in cpu_mhz:
db.insert_host_cpu_mhz(hostname, value)
assert db.select_cpu_mhz_for_host(hostname, len(cpu_mhz)) == cpu_mhz
@qc(1)
def select_host_characteristics():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_characteristics() == ({}, {}, {})
db.update_host('host1', 3000, 4, 4000)
db.update_host('host2', 3500, 8, 8000)
assert db.select_host_characteristics() == \
({'host1': 3000, 'host2': 3500},
{'host1': 4, 'host2': 8},
{'host1': 4000, 'host2': 8000})
@qc(1)
def select_host_id():
db = db_utils.init_db('sqlite:///:memory:')
host1_id = db.hosts.insert().execute(
hostname='host1',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
host2_id = db.hosts.insert().execute(
hostname='host2',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
assert db.select_host_id('host1') == host1_id
assert db.select_host_id('host2') == host2_id
@qc(1)
def select_host_ids():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_ids() == {}
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
assert db.select_host_ids() == hosts
@qc(1)
def cleanup_vm_resource_usage(
uuid=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
time = datetime.datetime.today()
for i in range(10):
db.vm_resource_usage.insert().execute(
vm_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(10)
db.cleanup_vm_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(5, 10)
@qc(1)
def cleanup_host_resource_usage(
hostname=str_(of='abc123', min_length=5, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
time = datetime.datetime.today()
for i in range(10):
db.host_resource_usage.insert().execute(
host_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(10)
db.cleanup_host_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(5, 10)
def test_insert_host_states(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_states({'host1': 0, 'host2': 1})
db.insert_host_states({'host1': 0, 'host2': 0})
db.insert_host_states({'host1': 1, 'host2': 1})
result = db.host_states.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [0, 0, 1])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [1, 0, 1])
@qc(10)
def select_host_states(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data:
res[host] = data[-1]
else:
res[host] = 1
assert db.select_host_states() == res
@qc(10)
def select_active_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 1 or not data:
res.append(host)
assert set(db.select_active_hosts()) == set(res)
@qc(10)
def select_inactive_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
hosts = {'1ab': [0], '3222': [0, 0, 1, 1, 1, 1, 0, 0], 'b222b': [0, 0, 1, 1, 1, 0, 1]}
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 0:
res.append(host)
assert set(db.select_inactive_hosts()) == set(res)
def test_insert_host_overload(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_overload('host2', False)
db.insert_host_overload('host1', True)
db.insert_host_overload('host1', False)
db.insert_host_overload('host2', True)
result = db.host_overload.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [1, 0])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [0, 1])
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='x' * 36).inserted_primary_key[0]
vm_id = db.vms.insert().execute(uuid='vm' * 18).inserted_primary_key[0]
host_id = db.update_host('host', 1, 1, 1)
db.insert_vm_migration('vm' * 18, 'host')
result = db.vm_migrations.select().execute().first()
assert result[1] == vm_id
assert result[2] == host_id

View File

@ -1,61 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
from sqlalchemy import *
import neat.db
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class DbUtils(TestCase):
@qc(1)
def init_db():
db = db_utils.init_db('sqlite:///:memory:')
assert type(db) is neat.db.Database
assert isinstance(db.hosts, Table)
assert isinstance(db.vms, Table)
assert isinstance(db.vm_resource_usage, Table)
assert isinstance(db.host_states, Table)
assert db.hosts.c.keys() == \
['id', 'hostname', 'cpu_mhz', 'cpu_cores', 'ram']
assert db.host_resource_usage.c.keys() == \
['id', 'host_id', 'timestamp', 'cpu_mhz']
assert list(db.host_resource_usage.foreign_keys)[0].target_fullname \
== 'hosts.id'
assert db.vms.c.keys() == \
['id', 'uuid']
assert db.vm_resource_usage.c.keys() == \
['id', 'vm_id', 'timestamp', 'cpu_mhz']
assert list(db.vm_resource_usage.foreign_keys)[0].target_fullname \
== 'vms.id'
assert db.vm_migrations.c.keys() == \
['id', 'vm_id', 'host_id', 'timestamp']
keys = set([list(db.vm_migrations.foreign_keys)[0].target_fullname,
list(db.vm_migrations.foreign_keys)[1].target_fullname])
assert keys == set(['vms.id', 'hosts.id'])
assert db.host_states.c.keys() == \
['id', 'host_id', 'timestamp', 'state']
assert list(db.host_states.foreign_keys)[0].target_fullname \
== 'hosts.id'
assert db.host_overload.c.keys() == \
['id', 'host_id', 'timestamp', 'overload']
assert list(db.host_overload.foreign_keys)[0].target_fullname \
== 'hosts.id'