Container flavor settings and Vswap

* This allows for defining exact vz configs for specific flavor, instead
  of the driver calculating them.  We can define future flavor-specific
  vz configs through nova flavor extra_specs (metadata) instead of adding
  configs in the driver.

  Current vz configs that can be defined through flavor extra specs:
  - 'vz_config_file': vz config file to initially apply
  - 'vz_cpulimit': vz cpulimit
  - 'vz_bandwidth': tc bandwidth limit

* Vswap has been added as the new default way of setting memory settings

* Refactoring has been done to use new Container class.  Also new
  resource_manager class has been created to centralize most vz calculations
  and config logic (vz networking has been left alone).

* Got rid of reset_instance_size since its not used by nova-compute manager.

* Changed vz config that is initially applied to be a fresh new container
  config and flavor extra_specs driven.

Change-Id: I125682bbe89cedec08f328394ebb402a5c079b6d
This commit is contained in:
Joe Cruz 2014-03-20 17:56:56 -05:00 committed by Joe Cruz
parent fbcaf28eee
commit e62a053881
4 changed files with 713 additions and 553 deletions

View File

@ -16,6 +16,7 @@
import json
from nova import exception
from nova.compute import flavors
from nova.openstack.common import log as logging
from oslo.config import cfg
from ovznovadriver.localization import _
@ -221,6 +222,263 @@ class OvzContainer(object):
"""
ovz_utils.execute('vzctl', 'destroy', self.ovz_id, run_as_root=True)
def apply_config(self, config):
"""
This adds the container root into the vz meta data so that
OpenVz acknowledges it as a container. Punting to a basic
config for now.
Run the command:
vzctl set <ctid> --save --applyconfig <config>
This sets the default configuration file for openvz containers. This
is a requisite step in making a container from an image tarball.
If this fails to run successfully an exception is raised because the
container this executes against requires a base config to start.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--applyconfig', config, run_as_root=True)
def set_vz_os_hint(self, ostemplate='ubuntu'):
"""
This exists as a stopgap because currently there are no os hints
in the image managment of nova. There are ways of hacking it in
via image_properties but this requires special case code just for
this driver.
Run the command:
vzctl set <ctid> --save --ostemplate <ostemplate>
Currently ostemplate defaults to ubuntu. This facilitates setting
the ostemplate setting in OpenVZ to allow the OpenVz helper scripts
to setup networking, nameserver and hostnames. Because of this, the
openvz driver only works with debian based distros.
If this fails to run an exception is raised as this is a critical piece
in making openvz run a container.
"""
# This sets the distro hint for OpenVZ to later use for the setting
# of resolver, hostname and the like
# TODO(imsplitbit): change the ostemplate default value to a flag
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--ostemplate', ostemplate, run_as_root=True)
def set_numflock(self, max_file_descriptors):
"""
Run the command:
vzctl set <ctid> --save --numflock <number>
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--numflock', max_file_descriptors,
run_as_root=True)
def set_numfiles(self, max_file_descriptors):
"""
Run the command:
vzctl set <ctid> --save --numfile <number>
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--numfile', max_file_descriptors,
run_as_root=True)
# TODO(jcru) looks like this method is not being used anywhere, delete?
# TODO(jcru) extract calculations from here and only pass tcp_sockets to
# function.
def set_numtcpsock(self, memory_mb):
"""
Run the commnand:
vzctl set <ctid> --save --numtcpsock <number>
:param instance:
:return:
"""
try:
tcp_sockets = CONF.ovz_numtcpsock_map[str(memory_mb)]
except (ValueError, TypeError, KeyError, cfg.NoSuchOptError):
LOG.error(_('There was no acceptable tcpsocket number found '
'defaulting to %s') % CONF.ovz_numtcpsock_default)
tcp_sockets = CONF.ovz_numtcpsock_default
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--numtcpsock', tcp_sockets, run_as_root=True)
def set_vmguarpages(self, num_pages):
"""
Set the vmguarpages attribute for a container. This number represents
the number of 4k blocks of memory that are guaranteed to the container.
This is what shows up when you run the command 'free' in the container.
Run the command:
vzctl set <ctid> --save --vmguarpages <num_pages>
If this fails to run then an exception is raised because this affects
the memory allocation for the container.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--vmguarpages', num_pages, run_as_root=True)
def set_privvmpages(self, num_pages):
"""
Set the privvmpages attribute for a container. This represents the
memory allocation limit. Think of this as a bursting limit. For now
We are setting to the same as vmguarpages but in the future this can be
used to thin provision a box.
Run the command:
vzctl set <ctid> --save --privvmpages <num_pages>
If this fails to run an exception is raised as this is essential for
the running container to operate properly within it's memory
constraints.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--privvmpages', num_pages, run_as_root=True)
def set_kmemsize(self, kmem_barrier, kmem_limit):
"""
Set the kmemsize attribute for a container. This represents the
amount of the container's memory allocation that will be made
available to the kernel. This is used for tcp connections, unix
sockets and the like.
This runs the command:
vzctl set <ctid> --save --kmemsize <barrier>:<limit>
If this fails to run an exception is raised as this is essential for
the container to operate under a normal load. Defaults for this
setting are completely inadequate for any normal workload.
"""
kmemsize = '%d:%d' % (kmem_barrier, kmem_limit)
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--kmemsize', kmemsize, run_as_root=True)
def set_cpuunits(self, units):
"""
Set the cpuunits setting for the container. This is an integer
representing the number of cpu fair scheduling counters that the
container has access to during one complete cycle.
Run the command:
vzctl set <ctid> --save --cpuunits <units>
If this fails to run an exception is raised because this is the secret
sauce to constraining each container within it's subscribed slice of
the host node.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--cpuunits', units, run_as_root=True)
def set_cpulimit(self, cpulimit):
"""
This is a number in % equal to the amount of cpu processing power
the container gets. NOTE: 100% is 1 logical cpu so if you have 12
cores with hyperthreading enabled then 100% of the whole host machine
would be 2400% or --cpulimit 2400.
Run the command:
vzctl set <ctid> --save --cpulimit <cpulimit>
If this fails to run an exception is raised because this is the secret
sauce to constraining each container within it's subscribed slice of
the host node.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--cpulimit', cpulimit, run_as_root=True)
def set_cpus(self, vcpus):
"""
The number of logical cpus that are made available to the container.
Default to showing 2 cpus to each container at a minimum.
Run the command:
vzctl set <ctid> --save --cpus <num_cpus>
If this fails to run an exception is raised because this limits the
number of cores that are presented to each container and if this fails
to set *ALL* cores will be presented to every container, that be bad.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save', '--cpus',
vcpus, run_as_root=True)
def set_ioprio(self, ioprio):
"""
Set the IO priority setting for a given container. This is represented
by an integer between 0 and 7.
Run the command:
vzctl set <ctid> --save --ioprio <iopriority>
If this fails to run an exception is raised because all containers are
given the same weight by default which will cause bad performance
across all containers when there is input/output contention.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--ioprio', ioprio, run_as_root=True)
def set_diskspace(self, soft_limit, hard_limit):
"""
Implement OpenVz disk quotas for local disk space usage.
This method takes a soft and hard limit. This is also the amount
of diskspace that is reported by system tools such as du and df inside
the container. If no argument is given then one will be calculated
based on the values in the instance_types table within the database.
Run the command:
vzctl set <ctid> --save --diskspace <soft_limit:hard_limit>
If this fails to run an exception is raised because this command
limits a container's ability to hijack all available disk space.
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--diskspace', '%s:%s' % (soft_limit, hard_limit),
run_as_root=True)
def set_vswap(self, ram, swap):
"""
Implement OpenVz vswap memory management model (The other being user
beancounters).
The sum of physpages_limit and swappages_limit limits the maximum
amount of memory which can be used by a container. When physpages limit
is reached, memory pages belonging to the container are pushed out to
so called virtual swap (vswap). The difference between normal swap and
vswap is that with vswap no actual disk I/O usually occurs. Instead, a
container is artificially slowed down, to emulate the effect of the
real swapping. Actual swap out occurs only if there is a global memory
shortage on the system.
Run the command:
vzctl set <ctid> --ram <physpages_limit> --swap <swappages_limit>
"""
ovz_utils.execute('vzctl', 'set', self.ovz_id, '--save',
'--ram', ram, '--swap', swap, run_as_root=True)
class OvzContainers(object):
@classmethod

View File

@ -22,7 +22,6 @@ is sketchy at best.
import base64
import fnmatch
import json
import math
from nova.compute import power_state
from nova import context
from nova import exception
@ -44,6 +43,7 @@ from ovznovadriver.openvz import network as ovznetwork
from ovznovadriver.openvz.network_drivers import tc as ovztc
from ovznovadriver.openvz import utils as ovz_utils
from ovznovadriver.openvz.volume_drivers import iscsi as ovziscsi
from ovznovadriver.openvz.resources import VZResourceManager
import os
from oslo.config import cfg
import socket
@ -68,9 +68,6 @@ openvz_conn_opts = [
cfg.StrOpt('ovz_bridge_device',
default='br100',
help='Bridge device to map veth devices to'),
cfg.StrOpt('ovz_disk_space_increment',
default='G',
help='Disk subscription increment'),
cfg.StrOpt('ovz_vif_driver',
default='ovznovadriver.openvz.network_drivers'
'.network_bridge.OVZNetworkBridgeDriver',
@ -113,25 +110,9 @@ openvz_conn_opts = [
cfg.BoolOpt('ovz_vzmigrate_verbose_migration_logging',
default=True,
help='Log verbose messages from vzmigrate command'),
cfg.BoolOpt('ovz_use_cpuunit',
default=True,
help='Use OpenVz cpuunits for guaranteed minimums'),
cfg.BoolOpt('ovz_use_cpulimit',
default=True,
help='Use OpenVz cpulimit for maximum cpu limits'),
cfg.BoolOpt('ovz_use_cpus',
default=True,
help='Use OpenVz cpus for max cpus '
'available to the container'),
cfg.BoolOpt('ovz_use_ioprio',
default=True,
help='Use IO fair scheduling'),
cfg.BoolOpt('ovz_disk_space_oversub',
default=True,
help='Allow over subscription of local disk'),
cfg.BoolOpt('ovz_use_disk_quotas',
default=True,
help='Use disk quotas to contain disk usage'),
cfg.BoolOpt('ovz_use_veth_devs',
default=True,
help='Use veth devices rather than venet'),
@ -141,23 +122,10 @@ openvz_conn_opts = [
cfg.BoolOpt('ovz_use_bind_mount',
default=False,
help='Use bind mounting instead of simfs'),
cfg.IntOpt('ovz_ioprio_limit',
default=7,
help='Limit for IO priority weighting'),
cfg.IntOpt('ovz_system_num_tries',
default=3,
help='Number of attempts to make when '
'running a system command'),
cfg.IntOpt('ovz_kmemsize_percent_of_memory',
default=20,
help='Percent of memory of the container to allow to be used '
'by the kernel'),
cfg.IntOpt('ovz_kmemsize_barrier_differential',
default=10,
help='Difference of kmemsize barrier vs limit'),
cfg.IntOpt('ovz_memory_unit_size',
default=512,
help='Unit size in MB'),
cfg.IntOpt('ovz_tc_id_max',
default=9999,
help='Max TC id to be used in generating a new id'),
@ -167,22 +135,12 @@ openvz_conn_opts = [
cfg.IntOpt('ovz_tc_max_line_speed',
default=1000,
help='Line speed in Mbit'),
cfg.IntOpt('ovz_file_descriptors_per_unit',
default=4096,
help='Max open file descriptors per memory unit'),
cfg.IntOpt('ovz_rsync_iterations',
default=1,
help='Number of times to rsync a container when migrating'),
cfg.IntOpt('ovz_numtcpsock_default',
default=2000,
help='Default number of tcp sockets to give each container'),
cfg.FloatOpt('ovz_disk_space_oversub_percent',
default=1.10,
help='Local disk over subscription percentage'),
cfg.FloatOpt('ovz_cpulimit_overcommit_multiplier',
default=1.0,
help='Multiplier for cpulimit to facilitate over '
'committing cpu resources'),
cfg.DictOpt('ovz_numtcpsock_map',
default={"8192": 3000, "1024": 2000, "4096": 2000,
"2048": 2000, "16384": 4000, "512": 2000},
@ -197,15 +155,13 @@ LOG = logging.getLogger('ovznovadriver.openvz.driver')
class OpenVzDriver(driver.ComputeDriver):
# OpenVz sets the upper limit of cpuunits to 500000
MAX_CPUUNITS = 500000
def __init__(self, virtapi, read_only=False):
"""
Create an instance of the openvz connection.
"""
super(OpenVzDriver, self).__init__(virtapi)
self.utility = dict()
self.resource_manager = VZResourceManager(virtapi)
self.host_stats = dict()
self._initiator = None
self.host = None
@ -224,7 +180,7 @@ class OpenVzDriver(driver.ComputeDriver):
self.host = host
LOG.debug(_('Determining the computing power of the host'))
self._get_cpulimit()
self.resource_manager.get_cpulimit()
self._refresh_host_stats()
LOG.debug(_('Flushing host TC rules if there are any'))
@ -345,7 +301,7 @@ class OpenVzDriver(driver.ComputeDriver):
LOG.debug(_('instance %s: is building') % instance['name'])
# Get current usages and resource availablity.
self._get_cpuunits_usage()
self.resource_manager.get_cpuunits_usage()
# Go through the steps of creating a container
# TODO(imsplitbit): Need to add conditionals around this stuff to make
@ -358,9 +314,13 @@ class OpenVzDriver(driver.ComputeDriver):
name=instance['name'],
nova_id=instance['id'],
)
# TODO(jimbobhickville) - move this stuff to OvzContainer
self._set_vz_os_hint(container)
self._configure_vz(container)
# TODO(jcru) change ostemplate='ubuntu' to config
container.set_vz_os_hint(ostemplate='ubuntu')
# A config file may be applied here but it may be
# config parameters may be overwritten afterwards
self.resource_manager.apply_config(context, container,
instance['instance_type_id'])
# instance.system_metadata will be saved by nova.compute.manager
# after this method returns (driver.spawn)
@ -383,7 +343,8 @@ class OpenVzDriver(driver.ComputeDriver):
# TODO(jimbobhickville) - move this stuff to OvzContainer
self._set_hostname(container, hostname=instance['hostname'])
self._set_instance_size(instance)
self.resource_manager.configure_container_resources(context, container,
instance['instance_type_id'])
self._set_onboot(container)
if block_device_info:
@ -422,32 +383,7 @@ class OpenVzDriver(driver.ComputeDriver):
timer.f = _wait_for_boot
return timer.start(interval=0.5)
def _set_vz_os_hint(self, container, ostemplate='ubuntu'):
"""
This exists as a stopgap because currently there are no os hints
in the image managment of nova. There are ways of hacking it in
via image_properties but this requires special case code just for
this driver.
Run the command:
vzctl set <ctid> --save --ostemplate <ostemplate>
Currently ostemplate defaults to ubuntu. This facilitates setting
the ostemplate setting in OpenVZ to allow the OpenVz helper scripts
to setup networking, nameserver and hostnames. Because of this, the
openvz driver only works with debian based distros.
If this fails to run an exception is raised as this is a critical piece
in making openvz run a container.
"""
# This sets the distro hint for OpenVZ to later use for the setting
# of resolver, hostname and the like
# TODO(imsplitbit): change the ostemplate default value to a flag
ovz_utils.execute('vzctl', 'set', container.ovz_id, '--save',
'--ostemplate', ostemplate, run_as_root=True)
def _cache_image(self, context, instance):
"""
@ -468,25 +404,6 @@ class OpenVzDriver(driver.ComputeDriver):
else:
return False
def _configure_vz(self, container, config='basic'):
"""
This adds the container root into the vz meta data so that
OpenVz acknowledges it as a container. Punting to a basic
config for now.
Run the command:
vzctl set <ctid> --save --applyconfig <config>
This sets the default configuration file for openvz containers. This
is a requisite step in making a container from an image tarball.
If this fails to run successfully an exception is raised because the
container this executes against requires a base config to start.
"""
ovz_utils.execute('vzctl', 'set', container.ovz_id, '--save',
'--applyconfig', config, run_as_root=True)
def _set_onboot(self, container):
"""
Method to set the onboot status of the instance. This is done
@ -699,385 +616,6 @@ class OpenVzDriver(driver.ComputeDriver):
self._attach_volumes(instance, block_device_info)
self._start(instance)
def reset_instance_size(self, instance, restart_instance=False):
"""
Public method for changing an instance back to it's original
flavor spec. If this fails an exception is raised because this
means that the instance flavor setting couldn't be rescued.
"""
try:
self._set_instance_size(instance)
if restart_instance:
self.reboot(instance, None, None, None, None)
return True
except exception.InstanceUnacceptable:
raise exception.InstanceUnacceptable(
_("Instance size reset FAILED"))
def _set_numflock(self, instance, max_file_descriptors):
"""
Run the command:
vzctl set <ctid> --save --numflock <number>
"""
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--numflock', max_file_descriptors,
run_as_root=True)
def _set_numfiles(self, instance, max_file_descriptors):
"""
Run the command:
vzctl set <ctid> --save --numfile <number>
"""
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--numfile', max_file_descriptors,
run_as_root=True)
def _set_numtcpsock(self, instance, memory_mb):
"""
Run the commnand:
vzctl set <ctid> --save --numtcpsock <number>
:param instance:
:return:
"""
try:
tcp_sockets = CONF.ovz_numtcpsock_map[str(memory_mb)]
except (ValueError, TypeError, KeyError, cfg.NoSuchOptError):
LOG.error(_('There was no acceptable tcpsocket number found '
'defaulting to %s') % CONF.ovz_numtcpsock_default)
tcp_sockets = CONF.ovz_numtcpsock_default
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--numtcpsock', tcp_sockets, run_as_root=True)
def _set_instance_size(self, instance, network_info=None,
is_migration=False):
"""
Given that these parameters make up and instance's 'size' we are
bundling them together to make resizing an instance on the host
an easier task.
"""
instance_size = ovz_utils.format_system_metadata(
instance['system_metadata'])
LOG.debug(_('Instance system metadata: %s') % instance_size)
if is_migration:
instance_memory_mb = instance_size.get(
'new_instance_type_memory_mb', None)
if not instance_memory_mb:
instance_memory_mb = instance_size.get(
'instance_type_memory_mb')
instance_vcpus = instance_size.get('new_instance_type_vcpus', None)
if not instance_vcpus:
instance_vcpus = instance_size.get('instance_type_vcpus')
instance_root_gb = instance_size.get(
'new_instance_type_root_gb', None)
if not instance_root_gb:
instance_root_gb = instance_size.get('instance_type_root_gb')
else:
instance_memory_mb = instance_size.get('instance_type_memory_mb')
instance_vcpus = instance_size.get('instance_type_vcpus')
instance_root_gb = instance_size.get('instance_type_root_gb')
instance_memory_mb = int(instance_memory_mb)
instance_vcpus = int(instance_vcpus)
instance_root_gb = int(instance_root_gb)
instance_memory_bytes = ((instance_memory_mb * 1024) * 1024)
instance_memory_pages = self._calc_pages(instance_memory_mb)
percent_of_resource = self._percent_of_resource(instance_memory_mb)
memory_unit_size = int(CONF.ovz_memory_unit_size)
max_fd_per_unit = int(CONF.ovz_file_descriptors_per_unit)
max_fd = int(instance_memory_mb / memory_unit_size) * max_fd_per_unit
self._set_vmguarpages(instance, instance_memory_pages)
self._set_privvmpages(instance, instance_memory_pages)
self._set_kmemsize(instance, instance_memory_bytes)
self._set_numfiles(instance, max_fd)
self._set_numflock(instance, max_fd)
if CONF.ovz_use_cpuunit:
self._set_cpuunits(instance, percent_of_resource)
if CONF.ovz_use_cpulimit:
self._set_cpulimit(instance, percent_of_resource)
if CONF.ovz_use_cpus:
self._set_cpus(instance, instance_vcpus)
if CONF.ovz_use_ioprio:
self._set_ioprio(instance, instance_memory_mb)
if CONF.ovz_use_disk_quotas:
self._set_diskspace(instance, instance_root_gb)
if network_info:
self._generate_tc_rules(instance, network_info, is_migration)
def _generate_tc_rules(self, instance, network_info, is_migration=False):
"""
Utility method to generate tc info for instances that have been
resized and/or migrated
"""
LOG.debug(_('Setting network sizing'))
container = OvzContainer.find(uuid=instance['uuid'])
bf = ovzboot.OVZBootFile(container.ovz_id, 755)
sf = ovzshutdown.OVZShutdownFile(container.ovz_id, 755)
if not is_migration:
with sf:
LOG.debug(_('Cleaning TC rules for %s') % instance['id'])
sf.read()
sf.run_contents(raise_on_error=False)
# On resize we throw away existing tc_id and make a new one
# because the resize *could* have taken place on a different host
# where the tc_id is already in use.
meta = ovz_utils.read_instance_metadata(instance['id'])
tc_id = meta.get('tc_id', None)
if tc_id:
ovz_utils.remove_instance_metadata_key(instance['id'], 'tc_id')
with sf:
sf.set_contents(list())
with bf:
bf.set_contents(list())
LOG.debug(_('Getting network dict for: %s') % container.uuid)
interfaces = ovz_utils.generate_network_dict(container,
network_info)
for net_dev in interfaces:
LOG.debug(_('Adding tc rules for: %s') %
net_dev['vz_host_if'])
tc = ovztc.OVZTcRules()
tc.instance_info(instance['id'], net_dev['address'],
net_dev['vz_host_if'])
with bf:
bf.append(tc.container_start())
with sf:
sf.append(tc.container_stop())
with bf:
if not is_migration:
# during migration, the instance isn't yet running, so it'll
# just spew errors to attempt to apply these rules before then
LOG.debug(_('Running TC rules for: %s') % instance['uuid'])
bf.run_contents()
LOG.debug(_('Saving TC rules for: %s') % instance['uuid'])
bf.write()
with sf:
sf.write()
def _set_vmguarpages(self, instance, num_pages):
"""
Set the vmguarpages attribute for a container. This number represents
the number of 4k blocks of memory that are guaranteed to the container.
This is what shows up when you run the command 'free' in the container.
Run the command:
vzctl set <ctid> --save --vmguarpages <num_pages>
If this fails to run then an exception is raised because this affects
the memory allocation for the container.
"""
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--vmguarpages', num_pages, run_as_root=True)
def _set_privvmpages(self, instance, num_pages):
"""
Set the privvmpages attribute for a container. This represents the
memory allocation limit. Think of this as a bursting limit. For now
We are setting to the same as vmguarpages but in the future this can be
used to thin provision a box.
Run the command:
vzctl set <ctid> --save --privvmpages <num_pages>
If this fails to run an exception is raised as this is essential for
the running container to operate properly within it's memory
constraints.
"""
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--privvmpages', num_pages, run_as_root=True)
def _set_kmemsize(self, instance, instance_memory):
"""
Set the kmemsize attribute for a container. This represents the
amount of the container's memory allocation that will be made
available to the kernel. This is used for tcp connections, unix
sockets and the like.
This runs the command:
vzctl set <ctid> --save --kmemsize <barrier>:<limit>
If this fails to run an exception is raised as this is essential for
the container to operate under a normal load. Defaults for this
setting are completely inadequate for any normal workload.
"""
# Now use the configuration CONF to calculate the appropriate
# values for both barrier and limit.
kmem_limit = int(instance_memory * (
float(CONF.ovz_kmemsize_percent_of_memory) / 100.0))
kmem_barrier = int(kmem_limit * (
float(CONF.ovz_kmemsize_barrier_differential) / 100.0))
kmemsize = '%d:%d' % (kmem_barrier, kmem_limit)
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--kmemsize', kmemsize, run_as_root=True)
def _set_cpuunits(self, instance, percent_of_resource):
"""
Set the cpuunits setting for the container. This is an integer
representing the number of cpu fair scheduling counters that the
container has access to during one complete cycle.
Run the command:
vzctl set <ctid> --save --cpuunits <units>
If this fails to run an exception is raised because this is the secret
sauce to constraining each container within it's subscribed slice of
the host node.
"""
LOG.debug(_('Reported cpuunits %s') % self.MAX_CPUUNITS)
LOG.debug(_('Reported percent of resource: %s') % percent_of_resource)
units = int(round(self.MAX_CPUUNITS * percent_of_resource))
if units > self.MAX_CPUUNITS:
units = self.MAX_CPUUNITS
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--cpuunits', units, run_as_root=True)
def _set_cpulimit(self, instance, percent_of_resource):
"""
This is a number in % equal to the amount of cpu processing power
the container gets. NOTE: 100% is 1 logical cpu so if you have 12
cores with hyperthreading enabled then 100% of the whole host machine
would be 2400% or --cpulimit 2400.
Run the command:
vzctl set <ctid> --save --cpulimit <cpulimit>
If this fails to run an exception is raised because this is the secret
sauce to constraining each container within it's subscribed slice of
the host node.
"""
cpulimit = int(round(
(self.utility['CPULIMIT'] * percent_of_resource) *
CONF.ovz_cpulimit_overcommit_multiplier))
if cpulimit > self.utility['CPULIMIT']:
cpulimit = self.utility['CPULIMIT']
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--cpulimit', cpulimit, run_as_root=True)
def _set_cpus(self, instance, vcpus):
"""
The number of logical cpus that are made available to the container.
Default to showing 2 cpus to each container at a minimum.
Run the command:
vzctl set <ctid> --save --cpus <num_cpus>
If this fails to run an exception is raised because this limits the
number of cores that are presented to each container and if this fails
to set *ALL* cores will be presented to every container, that be bad.
"""
vcpus = int(vcpus)
LOG.debug(_('VCPUs: %s') % vcpus)
utility_cpus = self.utility['CPULIMIT'] / 100
if vcpus > utility_cpus:
LOG.debug(
_('OpenVZ thinks vcpus "%(vcpus)s" '
'is greater than "%(utility_cpus)s"') % locals())
# We can't set cpus higher than the number of actual logical cores
# on the system so set a cap here
vcpus = self.utility['CPULIMIT'] / 100
LOG.debug(_('VCPUs: %s') % vcpus)
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save', '--cpus',
vcpus, run_as_root=True)
def _set_ioprio(self, instance, memory_mb):
"""
Set the IO priority setting for a given container. This is represented
by an integer between 0 and 7.
Run the command:
vzctl set <ctid> --save --ioprio <iopriority>
If this fails to run an exception is raised because all containers are
given the same weight by default which will cause bad performance
across all containers when there is input/output contention.
"""
# The old algorithm made it impossible to distinguish between a
# 512MB container and a 2048MB container for IO priority. We will
# for now follow a simple map to create a more non-linear
# relationship between the flavor sizes and their IO priority groups
# The IO priority of a container is grouped in 1 of 8 groups ranging
# from 0 to 7. We can calculate an appropriate value by finding out
# how many ovz_memory_unit_size chunks are in the container's memory
# allocation and then using python's math library to solve for that
# number's logarithm.
num_chunks = int(int(memory_mb) / CONF.ovz_memory_unit_size)
try:
ioprio = int(round(math.log(num_chunks, 2)))
except ValueError:
ioprio = 0
if ioprio > 7:
# ioprio can't be higher than 7 so set a ceiling
ioprio = 7
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--ioprio', ioprio, run_as_root=True)
def _set_diskspace(self, instance, root_gb):
"""
Implement OpenVz disk quotas for local disk space usage.
This method takes a soft and hard limit. This is also the amount
of diskspace that is reported by system tools such as du and df inside
the container. If no argument is given then one will be calculated
based on the values in the instance_types table within the database.
Run the command:
vzctl set <ctid> --save --diskspace <soft_limit:hard_limit>
If this fails to run an exception is raised because this command
limits a container's ability to hijack all available disk space.
"""
soft_limit = int(root_gb)
hard_limit = int(soft_limit * CONF.ovz_disk_space_oversub_percent)
# Now set the increment of the limit. I do this here so that I don't
# have to do this in every line above.
soft_limit = '%s%s' % (soft_limit, CONF.ovz_disk_space_increment)
hard_limit = '%s%s' % (hard_limit, CONF.ovz_disk_space_increment)
ovz_utils.execute('vzctl', 'set', instance['uuid'], '--save',
'--diskspace', '%s:%s' % (soft_limit, hard_limit),
run_as_root=True)
def _setup_networking(self, container, network_info):
"""
Create the vifs for the container's virtual networking. This should
@ -1591,74 +1129,6 @@ class OpenVzDriver(driver.ComputeDriver):
'num_cpu': 0,
'cpu_time': 0}
def _calc_pages(self, instance_memory, block_size=4096):
"""
Returns the number of pages for a given size of storage/memory
"""
return ((int(instance_memory) * 1024) * 1024) / block_size
def _percent_of_resource(self, instance_memory):
"""
In order to evenly distribute resources this method will calculate a
multiplier based on memory consumption for the allocated container and
the overall host memory. This can then be applied to the cpuunits in
self.utility to be passed as an argument to the self._set_cpuunits
method to limit cpu usage of the container to an accurate percentage of
the host. This is only done on self.spawn so that later, should
someone choose to do so, they can adjust the container's cpu usage
up or down.
"""
cont_mem_mb = (
float(instance_memory) / float(ovz_utils.get_memory_mb_total()))
# We shouldn't ever have more than 100% but if for some unforseen
# reason we do, lets limit it to 1 to make all of the other
# calculations come out clean.
if cont_mem_mb > 1:
LOG.error(_('_percent_of_resource came up with more than 100%'))
return 1.0
else:
return cont_mem_mb
def _get_cpulimit(self):
"""
Fetch the total possible cpu processing limit in percentage to be
divided up across all containers. This is expressed in percentage
being added up by logical processor. If there are 24 logical
processors then the total cpulimit for the host node will be
2400.
"""
self.utility['CPULIMIT'] = ovz_utils.get_vcpu_total() * 100
LOG.debug(_('Updated cpulimit in utility'))
LOG.debug(
_('Current cpulimit in utility: %s') % self.utility['CPULIMIT'])
def _get_cpuunits_usage(self):
"""
Use openvz tools to discover the total used processing power. This is
done using the vzcpucheck -v command.
Run the command:
vzcpucheck -v
If this fails to run an exception should not be raised as this is a
soft error and results only in the lack of knowledge of what the
current cpuunit usage of each container.
"""
out = ovz_utils.execute(
'vzcpucheck', '-v', run_as_root=True, raise_on_error=False)
if out:
for line in out.splitlines():
line = line.split()
if len(line) > 0:
if line[0].isdigit():
LOG.debug(_('Usage for CTID %(id)s: %(usage)s') %
{'id': line[0], 'usage': line[1]})
if int(line[0]) not in self.utility.keys():
self.utility[int(line[0])] = dict()
self.utility[int(line[0])] = int(line[1])
def get_available_resource(self, nodename):
"""Retrieve resource info.
@ -1802,7 +1272,12 @@ class OpenVzDriver(driver.ComputeDriver):
# This is a resize on the same host so its simple, resize
# in place and then exit the method
LOG.debug(_('Finishing resize-in-place for %s') % instance['uuid'])
self._set_instance_size(instance, network_info, False)
container = OvzContainer.find(uuid=instance['uuid'])
self.resource_manager.configure_container_resources(context,
container, instance['instance_type_id'])
self.resource_manager.configure_container_network(container,
network_info, is_migration=False)
return
if block_device_info:
@ -1824,23 +1299,28 @@ class OpenVzDriver(driver.ComputeDriver):
# volumes.
self._attach_volumes(instance, block_device_info)
container = OvzContainer.find(uuid=instance['uuid'])
if resize_instance:
LOG.debug(_('A resize after migration was requested: %s') %
instance['uuid'])
self._set_instance_size(instance, network_info, True)
self.resource_manager.configure_container_resources(context,
container, instance['instance_type_id'])
self.resource_manager.configure_container_network(container,
network_info, is_migration=True)
LOG.debug(_('Resized instance after migration: %s') %
instance['uuid'])
else:
LOG.debug(_('Regenerating TC rules for instance %s') %
instance['uuid'])
self._generate_tc_rules(instance, network_info, True)
self.resource_manager.configure_container_network(container,
network_info, is_migration=True)
LOG.debug(_('Regenerated TC rules for instance %s') %
instance['uuid'])
if not live_migration:
self._start(instance)
container = OvzContainer.find(uuid=instance['uuid'])
# Some data gets lost in the migration, make sure ovz has current info
container.save_ovz_metadata()
# instance.system_metadata will be saved by nova.compute.manager
@ -1966,13 +1446,17 @@ class OpenVzDriver(driver.ComputeDriver):
LOG.debug(_('Beginning finish_revert_migration'))
meta = ovz_utils.read_instance_metadata(instance['id'])
migration_type = meta.get('migration_type')
container = OvzContainer.find(uuid=instance['uuid'])
if migration_type == 'resize_in_place':
# This is a resize on the same host so its simple, resize
# in place and then exit the method
LOG.debug(_('Reverting in-place migration for %s') %
instance['id'])
self._set_instance_size(instance, network_info)
self.resource_manager.configure_container_resources(context,
container, instance['instance_type_id'])
self.resource_manager.configure_container_network(container,
network_info)
if ovz_utils.remove_instance_metadata_key(instance['id'],
'migration_type'):
LOG.debug(_('Removed migration_type metadata'))
@ -1982,7 +1466,6 @@ class OpenVzDriver(driver.ComputeDriver):
LOG.debug(_('Failed to remove migration_type metadata'))
return
container = OvzContainer.find(uuid=instance['uuid'])
container.save_ovz_metadata()
if block_device_info:
LOG.debug(_('Instance %s has volumes') % instance['id'])

View File

@ -73,6 +73,8 @@ class OVZTcRules(object):
:param vz_iface: interface on the hosts bridge that is associated with
the instance
"""
# TODO(jcru) figure out if there is a case for no instance_id else
# take it out
if not instance_id:
self.instance_type = dict()
self.instance_type['memory_mb'] = 2048
@ -87,11 +89,23 @@ class OVZTcRules(object):
self.address = address
self.vz_iface = vz_iface
# Calculate the bandwidth total by figuring out how many units we have
self.bandwidth = int(
round(self.instance_type['memory_mb'] /
CONF.ovz_memory_unit_size)) * CONF.ovz_tc_mbit_per_unit
# TODO(jcru) Ideally wish to move this to resources.py
# check under instance_type/flavor extra_specs to see if bandwidth has
# been predefined for flavor
extra_specs = self.instance_type.get("extra_specs", {})
self.bandwidth = extra_specs.get("vz_bandwidth", None)
if not self.bandwidth:
LOG.debug(_('No (vz_bandwidth) extra_specs key/value defined for '
'flavor id (%s)') % self.instance_type['flavorid'])
# Calculate the bandwidth total by figuring out how many units we
# have
self.bandwidth = int(round(self.instance_type['memory_mb'] /
CONF.ovz_memory_unit_size)) * CONF.ovz_tc_mbit_per_unit
else:
int(self.bandwidth)
LOG.debug(_('Allotted bandwidth: %s') % self.bandwidth)
self.tc_id = self._get_instance_tc_id()
if not self.tc_id:
LOG.debug(_('No preassigned tc_id for %s, getting a new one') %

View File

@ -0,0 +1,405 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from ovznovadriver.openvz import utils as ovz_utils
from ovznovadriver.openvz.file_ext import boot as ovzboot
from ovznovadriver.openvz.file_ext import shutdown as ovzshutdown
from ovznovadriver.openvz.network_drivers import tc as ovztc
from ovznovadriver.localization import _
from oslo.config import cfg
from nova.openstack.common import log as logging
__openvz_resource_opts = [
cfg.BoolOpt('ovz_use_cpuunit',
default=True,
help='Use OpenVz cpuunits for guaranteed minimums'),
cfg.BoolOpt('ovz_use_cpulimit',
default=True,
help='Use OpenVz cpulimit for maximum cpu limits'),
cfg.FloatOpt('ovz_cpulimit_overcommit_multiplier',
default=1.0,
help='Multiplier for cpulimit to facilitate over '
'committing cpu resources'),
cfg.BoolOpt('ovz_use_cpus',
default=True,
help='Use OpenVz cpus for max cpus '
'available to the container'),
cfg.BoolOpt('ovz_use_ioprio',
default=True,
help='Use IO fair scheduling'),
cfg.BoolOpt('ovz_use_ubc',
default=False,
help='Use OpenVz User BeanCounters memory management model '
'instead of vswap'),
cfg.IntOpt('ovz_file_descriptors_per_unit',
default=4096,
help='Max open file descriptors per memory unit'),
cfg.IntOpt('ovz_memory_unit_size',
default=512,
help='Unit size in MB'),
cfg.BoolOpt('ovz_use_disk_quotas',
default=True,
help='Use disk quotas to contain disk usage'),
cfg.StrOpt('ovz_disk_space_increment',
default='G',
help='Disk subscription increment'),
cfg.FloatOpt('ovz_disk_space_oversub_percent',
default=1.10,
help='Local disk over subscription percentage'),
cfg.IntOpt('ovz_kmemsize_percent_of_memory',
default=20,
help='Percent of memory of the container to allow to be used '
'by the kernel'),
cfg.IntOpt('ovz_kmemsize_barrier_differential',
default=10,
help='Difference of kmemsize barrier vs limit'),
cfg.StrOpt('ovz_default_config',
default='basic',
help='Default config file to apply if no config is set for '
'flavor extra_specs'),
]
CONF = cfg.CONF
CONF.register_opts(__openvz_resource_opts)
LOG = logging.getLogger(__name__)
class VZResourceManager(object):
"""Manage OpenVz container resources
The purpose of this class is meant to decide/calculate vz resource configs
and apply them through the Container class"""
# TODO (jcru) make this a config?
# OpenVz sets the upper limit of cpuunits to 500000
MAX_CPUUNITS = 500000
def __init__(self, virtapi):
"""Requires virtapi (api to conductor) to get flavor info"""
self.virtapi = virtapi
# TODO (jcru) replace dict (self.utility) with self.cpulimit?
self.utility = dict()
def _get_flavor_info(self, context, flavor_id):
"""Get the latest flavor info which contains extra_specs"""
# instnace_type refers to the flavor (what you see in flavor list)
return self.virtapi.flavor_get(context, flavor_id)
def _calc_pages(self, instance_memory, block_size=4096):
"""
Returns the number of pages for a given size of storage/memory
"""
return ((int(instance_memory) * 1024) * 1024) / block_size
def _percent_of_resource(self, instance_memory):
"""
In order to evenly distribute resources this method will calculate a
multiplier based on memory consumption for the allocated container and
the overall host memory. This can then be applied to the cpuunits in
self.utility to be passed as an argument to the self._set_cpuunits
method to limit cpu usage of the container to an accurate percentage of
the host. This is only done on self.spawn so that later, should
someone choose to do so, they can adjust the container's cpu usage
up or down.
"""
cont_mem_mb = (
float(instance_memory) / float(ovz_utils.get_memory_mb_total()))
# We shouldn't ever have more than 100% but if for some unforseen
# reason we do, lets limit it to 1 to make all of the other
# calculations come out clean.
if cont_mem_mb > 1:
LOG.error(_('_percent_of_resource came up with more than 100%'))
return 1.0
else:
return cont_mem_mb
def get_cpulimit(self):
"""
Fetch the total possible cpu processing limit in percentage to be
divided up across all containers. This is expressed in percentage
being added up by logical processor. If there are 24 logical
processors then the total cpulimit for the host node will be
2400.
"""
self.utility['CPULIMIT'] = ovz_utils.get_vcpu_total() * 100
LOG.debug(_('Updated cpulimit in utility'))
LOG.debug(
_('Current cpulimit in utility: %s') % self.utility['CPULIMIT'])
def get_cpuunits_usage(self):
"""
Use openvz tools to discover the total used processing power. This is
done using the vzcpucheck -v command.
Run the command:
vzcpucheck -v
If this fails to run an exception should not be raised as this is a
soft error and results only in the lack of knowledge of what the
current cpuunit usage of each container.
"""
out = ovz_utils.execute(
'vzcpucheck', '-v', run_as_root=True, raise_on_error=False)
if out:
for line in out.splitlines():
line = line.split()
if len(line) > 0:
if line[0].isdigit():
LOG.debug(_('Usage for CTID %(id)s: %(usage)s') %
{'id': line[0], 'usage': line[1]})
if int(line[0]) not in self.utility.keys():
self.utility[int(line[0])] = dict()
self.utility[int(line[0])] = int(line[1])
def configure_container_resources(self, context, container,
requested_flavor_id):
instance_type = self._get_flavor_info(context, requested_flavor_id)
self._setup_memory(container, instance_type)
self._setup_file_limits(container, instance_type)
self._setup_cpu(container, instance_type)
self._setup_io(container, instance_type)
self._setup_disk_quota(container, instance_type)
# TODO(jcru) normally we would pass a context and requested flavor as
# configure_container_resources but we look up instance_type within tc
# code. Ideally all of the networking setup would happen here
def configure_container_network(self, container, network_info,
is_migration=False):
self._generate_tc_rules(container, network_info, is_migration)
def apply_config(self, context, container, requested_flavor_id):
"""In order to succesfully apply a config file the file must exist
within /etc/vz/config if just the file name is passed (e.g. samples)
or the full file path must be specified
It is possible to not define a config file which in that case nothing
is applied.
"""
instance_type = self._get_flavor_info(context, requested_flavor_id)
instance_type_extra_specs = instance_type.get('extra_specs', {})
# TODO(jcru) handle default config for both UBC and Vswap
config_file = instance_type_extra_specs.get("vz_config_file",
CONF.ovz_default_config)
if config_file:
container.apply_config(config_file)
def _setup_memory(self, container, instance_type):
"""Decides on what VZ memory model to use.
By default we use Vswap. If User Beancounters (UBC) is desired, enable
through config.
"""
if CONF.ovz_use_ubc:
self._setup_memory_with_ubc(instance_type, container)
return
self._setup_memory_with_vswap(container, instance_type)
def _setup_memory_with_ubc(self, container, instance_type):
instance_memory_mb = int(instance_type.get('memory_mb'))
instance_memory_bytes = ((instance_memory_mb * 1024) * 1024)
instance_memory_pages = self._calc_pages(instance_memory_mb)
# Now use the configuration CONF to calculate the appropriate
# values for both barrier and limit.
kmem_limit = int(instance_memory_mb * (
float(CONF.ovz_kmemsize_percent_of_memory) / 100.0))
kmem_barrier = int(kmem_limit * (
float(CONF.ovz_kmemsize_barrier_differential) / 100.0))
container.set_vmguarpages(instance_memory_pages)
container.set_privvmpages(instance_memory_pages)
container.set_kmemsize(kmem_barrier, kmem_limit)
def _setup_memory_with_vswap(self, container, instance_type):
memory = int(instance_type.get('memory_mb'))
swap = instance_type.get('swap', 0)
# Memory should be in MB
memory = "%sM" % memory
# Swap should be in GB
swap = "%sG" % swap
container.set_vswap(memory, swap)
def _setup_file_limits(self, container, instance_type):
instance_memory_mb = int(instance_type.get('memory_mb'))
memory_unit_size = int(CONF.ovz_memory_unit_size)
max_fd_per_unit = int(CONF.ovz_file_descriptors_per_unit)
max_fd = int(instance_memory_mb / memory_unit_size) * max_fd_per_unit
container.set_numfiles(max_fd)
container.set_numflock(max_fd)
# TODO(jcru) overide caclulated values?
def _setup_cpu(self, container, instance_type):
"""
"""
instance_memory_mb = instance_type.get('memory_mb')
instance_type_extra_specs = instance_type.get('extra_specs', {})
percent_of_resource = self._percent_of_resource(instance_memory_mb)
if CONF.ovz_use_cpuunit:
LOG.debug(_('Reported cpuunits %s') % self.MAX_CPUUNITS)
LOG.debug(_('Reported percent of resource: %s') % percent_of_resource)
units = int(round(self.MAX_CPUUNITS * percent_of_resource))
if units > self.MAX_CPUUNITS:
units = self.MAX_CPUUNITS
container.set_cpuunits(units)
if CONF.ovz_use_cpulimit:
# Check if cpulimit for flavor is predefined in flavors extra_specs
cpulimit = instance_type_extra_specs.get('vz_cpulimit', None)
if not cpulimit:
cpulimit = int(round(
(self.utility['CPULIMIT'] * percent_of_resource) *
CONF.ovz_cpulimit_overcommit_multiplier))
else:
cpulimit = int(cpulimit)
if cpulimit > self.utility['CPULIMIT']:
LOG.warning(_("The cpulimit that was calculated or predefined "
"(%s) is to high based on the CPULIMIT (%s)") %
(cpulimit, self.utility['CPULIMIT']))
LOG.warning(_("Using CPULIMIT instead."))
cpulimit = self.utility['CPULIMIT']
container.set_cpulimit(cpulimit)
if CONF.ovz_use_cpus:
vcpus = int(instance_type.get('vcpus'))
LOG.debug(_('VCPUs: %s') % vcpus)
utility_cpus = self.utility['CPULIMIT'] / 100
if vcpus > utility_cpus:
LOG.debug(
_('OpenVZ thinks vcpus "%(vcpus)s" '
'is greater than "%(utility_cpus)s"') % locals())
# We can't set cpus higher than the number of actual logical cores
# on the system so set a cap here
vcpus = self.utility['CPULIMIT'] / 100
LOG.debug(_('VCPUs: %s') % vcpus)
container.set_cpus(vcpus)
def _setup_io(self, container, instance_type):
# The old algorithm made it impossible to distinguish between a
# 512MB container and a 2048MB container for IO priority. We will
# for now follow a simple map to create a more non-linear
# relationship between the flavor sizes and their IO priority groups
# The IO priority of a container is grouped in 1 of 8 groups ranging
# from 0 to 7. We can calculate an appropriate value by finding out
# how many ovz_memory_unit_size chunks are in the container's memory
# allocation and then using python's math library to solve for that
# number's logarithm.
if CONF.ovz_use_ioprio:
instance_memory_mb = instance_type.get('memory_mb')
num_chunks = int(int(instance_memory_mb) / CONF.ovz_memory_unit_size)
try:
ioprio = int(round(math.log(num_chunks, 2)))
except ValueError:
ioprio = 0
if ioprio > 7:
# ioprio can't be higher than 7 so set a ceiling
ioprio = 7
container.set_ioprio(ioprio)
def _setup_disk_quota(self, container, instance_type):
if CONF.ovz_use_disk_quotas:
instance_root_gb = instance_type.get('root_gb')
soft_limit = int(instance_root_gb)
hard_limit = int(soft_limit * CONF.ovz_disk_space_oversub_percent)
# Now set the increment of the limit. I do this here so that I don't
# have to do this in every line above.
soft_limit = '%s%s' % (soft_limit, CONF.ovz_disk_space_increment)
hard_limit = '%s%s' % (hard_limit, CONF.ovz_disk_space_increment)
container.set_diskspace(soft_limit, hard_limit)
# TODO(jcru) move more of tc logic into here ?
def _generate_tc_rules(self, container, network_info, is_migration=False):
"""
Utility method to generate tc info for instances that have been
resized and/or migrated
"""
LOG.debug(_('Setting network sizing'))
boot_file = ovzboot.OVZBootFile(container.ovz_id, 755)
shutdown_file = ovzshutdown.OVZShutdownFile(container.ovz_id, 755)
if not is_migration:
with shutdown_file:
LOG.debug(_('Cleaning TC rules for %s') % container.nova_id)
shutdown_file.read()
shutdown_file.run_contents(raise_on_error=False)
# On resize we throw away existing tc_id and make a new one
# because the resize *could* have taken place on a different host
# where the tc_id is already in use.
meta = ovz_utils.read_instance_metadata(container.nova_id)
tc_id = meta.get('tc_id', None)
if tc_id:
ovz_utils.remove_instance_metadata_key(container.nova_id, 'tc_id')
with shutdown_file:
shutdown_file.set_contents(list())
with boot_file:
boot_file.set_contents(list())
LOG.debug(_('Getting network dict for: %s') % container.uuid)
interfaces = ovz_utils.generate_network_dict(container,
network_info)
for net_dev in interfaces:
LOG.debug(_('Adding tc rules for: %s') %
net_dev['vz_host_if'])
tc = ovztc.OVZTcRules()
tc.instance_info(container.nova_id, net_dev['address'],
net_dev['vz_host_if'])
with boot_file:
boot_file.append(tc.container_start())
with shutdown_file:
shutdown_file.append(tc.container_stop())
with boot_file:
if not is_migration:
# during migration, the instance isn't yet running, so it'll
# just spew errors to attempt to apply these rules before then
LOG.debug(_('Running TC rules for: %s') % container.ovz_id)
boot_file.run_contents()
LOG.debug(_('Saving TC rules for: %s') % container.ovz_id)
boot_file.write()
with shutdown_file:
shutdown_file.write()