Improve base compute and utils helper organization
Move helpers out of base compute class that do not actually need a test class, and create a new hardware.py file to keep helpers that have to do with hardware. Change-Id: I7a8ae901dce68f4d858aa2062820fd7405c87f45
This commit is contained in:
parent
a3ff33907a
commit
4491e3db8a
@ -23,6 +23,7 @@ from tempest import config
|
|||||||
from tempest.lib.common.utils import data_utils
|
from tempest.lib.common.utils import data_utils
|
||||||
from tempest.lib.common.utils import test_utils
|
from tempest.lib.common.utils import test_utils
|
||||||
|
|
||||||
|
from whitebox_tempest_plugin import hardware
|
||||||
from whitebox_tempest_plugin.services import clients
|
from whitebox_tempest_plugin.services import clients
|
||||||
from whitebox_tempest_plugin import utils as whitebox_utils
|
from whitebox_tempest_plugin import utils as whitebox_utils
|
||||||
|
|
||||||
@ -142,29 +143,14 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
|||||||
self.assertEqual(target_host, self.get_host_for_server(server_id),
|
self.assertEqual(target_host, self.get_host_for_server(server_id),
|
||||||
msg)
|
msg)
|
||||||
|
|
||||||
def get_all_cpus(self):
|
|
||||||
"""Aggregate the dictionary values of [whitebox]/cpu_topology from
|
|
||||||
tempest.conf into a list of pCPU ids.
|
|
||||||
"""
|
|
||||||
topology_dict = CONF.whitebox_hardware.cpu_topology
|
|
||||||
cpus = []
|
|
||||||
[cpus.extend(c) for c in topology_dict.values()]
|
|
||||||
return cpus
|
|
||||||
|
|
||||||
def get_pinning_as_set(self, server_id):
|
def get_pinning_as_set(self, server_id):
|
||||||
pinset = set()
|
pinset = set()
|
||||||
root = self.get_server_xml(server_id)
|
root = self.get_server_xml(server_id)
|
||||||
vcpupins = root.findall('./cputune/vcpupin')
|
vcpupins = root.findall('./cputune/vcpupin')
|
||||||
for pin in vcpupins:
|
for pin in vcpupins:
|
||||||
pinset |= whitebox_utils.parse_cpu_spec(pin.get('cpuset'))
|
pinset |= hardware.parse_cpu_spec(pin.get('cpuset'))
|
||||||
return pinset
|
return pinset
|
||||||
|
|
||||||
def _get_cpu_spec(self, cpu_list):
|
|
||||||
"""Returns a libvirt-style CPU spec from the provided list of integers. For
|
|
||||||
example, given [0, 2, 3], returns "0,2,3".
|
|
||||||
"""
|
|
||||||
return ','.join(map(str, cpu_list))
|
|
||||||
|
|
||||||
# TODO(lyarwood): Refactor all of this into a common module between
|
# TODO(lyarwood): Refactor all of this into a common module between
|
||||||
# tempest.api.{compute,volume} and tempest.scenario.manager where this
|
# tempest.api.{compute,volume} and tempest.scenario.manager where this
|
||||||
# has been copied from to avoid mixing api and scenario classes.
|
# has been copied from to avoid mixing api and scenario classes.
|
||||||
|
@ -37,6 +37,7 @@ from tempest.exceptions import BuildErrorException
|
|||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
|
|
||||||
from whitebox_tempest_plugin.api.compute import base
|
from whitebox_tempest_plugin.api.compute import base
|
||||||
|
from whitebox_tempest_plugin import hardware
|
||||||
from whitebox_tempest_plugin.services import clients
|
from whitebox_tempest_plugin.services import clients
|
||||||
from whitebox_tempest_plugin import utils as whitebox_utils
|
from whitebox_tempest_plugin import utils as whitebox_utils
|
||||||
|
|
||||||
@ -65,7 +66,7 @@ class BasePinningTest(base.BaseWhiteboxComputeTest):
|
|||||||
cell_pins = {}
|
cell_pins = {}
|
||||||
for memnode in memnodes:
|
for memnode in memnodes:
|
||||||
cell_pins[int(memnode.get('cellid'))] = \
|
cell_pins[int(memnode.get('cellid'))] = \
|
||||||
whitebox_utils.parse_cpu_spec(memnode.get('nodeset'))
|
hardware.parse_cpu_spec(memnode.get('nodeset'))
|
||||||
|
|
||||||
return cell_pins
|
return cell_pins
|
||||||
|
|
||||||
@ -82,7 +83,7 @@ class BasePinningTest(base.BaseWhiteboxComputeTest):
|
|||||||
emulator_threads = set()
|
emulator_threads = set()
|
||||||
for pin in emulatorpins:
|
for pin in emulatorpins:
|
||||||
emulator_threads |= \
|
emulator_threads |= \
|
||||||
whitebox_utils.parse_cpu_spec(pin.get('cpuset'))
|
hardware.parse_cpu_spec(pin.get('cpuset'))
|
||||||
|
|
||||||
return emulator_threads
|
return emulator_threads
|
||||||
|
|
||||||
@ -413,7 +414,7 @@ class EmulatorExtraCPUTest(BasePinningTest):
|
|||||||
emulatorpins = root.findall('./cputune/emulatorpin')
|
emulatorpins = root.findall('./cputune/emulatorpin')
|
||||||
emulator_threads = set()
|
emulator_threads = set()
|
||||||
for pin in emulatorpins:
|
for pin in emulatorpins:
|
||||||
emulator_threads |= whitebox_utils.parse_cpu_spec(
|
emulator_threads |= hardware.parse_cpu_spec(
|
||||||
pin.get('cpuset'))
|
pin.get('cpuset'))
|
||||||
|
|
||||||
return emulator_threads
|
return emulator_threads
|
||||||
@ -438,10 +439,10 @@ class EmulatorExtraCPUTest(BasePinningTest):
|
|||||||
if len(CONF.whitebox_hardware.cpu_topology[self.numa_to_use]) < 3:
|
if len(CONF.whitebox_hardware.cpu_topology[self.numa_to_use]) < 3:
|
||||||
raise self.skipException('Test requires NUMA Node with 3 or more '
|
raise self.skipException('Test requires NUMA Node with 3 or more '
|
||||||
'CPUs to run')
|
'CPUs to run')
|
||||||
dedicated_set = self._get_cpu_spec(
|
dedicated_set = hardware.format_cpu_spec(
|
||||||
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][:2])
|
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][:2])
|
||||||
|
|
||||||
cpu_shared_set_str = self._get_cpu_spec(
|
cpu_shared_set_str = hardware.format_cpu_spec(
|
||||||
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][2:])
|
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][2:])
|
||||||
|
|
||||||
hostname = self.list_compute_hosts()[0]
|
hostname = self.list_compute_hosts()[0]
|
||||||
@ -477,7 +478,7 @@ class EmulatorExtraCPUTest(BasePinningTest):
|
|||||||
|
|
||||||
# Confirm the emulator threads from server's A and B are both equal
|
# Confirm the emulator threads from server's A and B are both equal
|
||||||
# to cpu_shared_set
|
# to cpu_shared_set
|
||||||
cpu_shared_set = whitebox_utils.parse_cpu_spec(cpu_shared_set_str)
|
cpu_shared_set = hardware.parse_cpu_spec(cpu_shared_set_str)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
emulator_threads_a, cpu_shared_set,
|
emulator_threads_a, cpu_shared_set,
|
||||||
'Emulator threads for server A %s are not the same as CPU set '
|
'Emulator threads for server A %s are not the same as CPU set '
|
||||||
@ -504,7 +505,7 @@ class EmulatorExtraCPUTest(BasePinningTest):
|
|||||||
raise self.skipException('Test requires NUMA Node with 2 or more '
|
raise self.skipException('Test requires NUMA Node with 2 or more '
|
||||||
'CPUs to run')
|
'CPUs to run')
|
||||||
|
|
||||||
dedicated_set = self._get_cpu_spec(
|
dedicated_set = hardware.format_cpu_spec(
|
||||||
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][:2])
|
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][:2])
|
||||||
hostname = self.list_compute_hosts()[0]
|
hostname = self.list_compute_hosts()[0]
|
||||||
host_sm = clients.NovaServiceManager(
|
host_sm = clients.NovaServiceManager(
|
||||||
@ -575,7 +576,7 @@ class EmulatorExtraCPUTest(BasePinningTest):
|
|||||||
'CPUs to run')
|
'CPUs to run')
|
||||||
dedicated_set = \
|
dedicated_set = \
|
||||||
set(CONF.whitebox_hardware.cpu_topology[self.numa_to_use])
|
set(CONF.whitebox_hardware.cpu_topology[self.numa_to_use])
|
||||||
dedicated_set_str = self._get_cpu_spec(dedicated_set)
|
dedicated_set_str = hardware.format_cpu_spec(dedicated_set)
|
||||||
|
|
||||||
hostname = self.list_compute_hosts()[0]
|
hostname = self.list_compute_hosts()[0]
|
||||||
host_sm = clients.NovaServiceManager(
|
host_sm = clients.NovaServiceManager(
|
||||||
@ -652,7 +653,7 @@ class EmulatorExtraCPUTest(BasePinningTest):
|
|||||||
if len(CONF.whitebox_hardware.cpu_topology[self.numa_to_use]) < 2:
|
if len(CONF.whitebox_hardware.cpu_topology[self.numa_to_use]) < 2:
|
||||||
raise self.skipException('Test requires NUMA Node with 2 or more '
|
raise self.skipException('Test requires NUMA Node with 2 or more '
|
||||||
'CPUs to run')
|
'CPUs to run')
|
||||||
dedicated_set = self._get_cpu_spec(
|
dedicated_set = hardware.format_cpu_spec(
|
||||||
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][:2])
|
CONF.whitebox_hardware.cpu_topology[self.numa_to_use][:2])
|
||||||
|
|
||||||
hostname = self.list_compute_hosts()[0]
|
hostname = self.list_compute_hosts()[0]
|
||||||
@ -805,7 +806,7 @@ class NUMALiveMigrationBase(BasePinningTest):
|
|||||||
"""
|
"""
|
||||||
root = self.get_server_xml(server_id)
|
root = self.get_server_xml(server_id)
|
||||||
cpuset = root.find('./vcpu').attrib.get('cpuset', None)
|
cpuset = root.find('./vcpu').attrib.get('cpuset', None)
|
||||||
return whitebox_utils.parse_cpu_spec(cpuset)
|
return hardware.parse_cpu_spec(cpuset)
|
||||||
|
|
||||||
def _get_hugepage_xml_element(self, server_id):
|
def _get_hugepage_xml_element(self, server_id):
|
||||||
"""Gather and return all instances of the page element from XML element
|
"""Gather and return all instances of the page element from XML element
|
||||||
@ -872,9 +873,9 @@ class NUMALiveMigrationTest(NUMALiveMigrationBase):
|
|||||||
self.os_admin.services_client)
|
self.os_admin.services_client)
|
||||||
with whitebox_utils.multicontext(
|
with whitebox_utils.multicontext(
|
||||||
host1_sm.config_options(('DEFAULT', 'vcpu_pin_set',
|
host1_sm.config_options(('DEFAULT', 'vcpu_pin_set',
|
||||||
self._get_cpu_spec(topo_1[0]))),
|
hardware.format_cpu_spec(topo_1[0]))),
|
||||||
host2_sm.config_options(('DEFAULT', 'vcpu_pin_set',
|
host2_sm.config_options(('DEFAULT', 'vcpu_pin_set',
|
||||||
self._get_cpu_spec(topo_2[0])))
|
hardware.format_cpu_spec(topo_2[0])))
|
||||||
):
|
):
|
||||||
# Boot 2 servers such that their vCPUs "fill" a NUMA node.
|
# Boot 2 servers such that their vCPUs "fill" a NUMA node.
|
||||||
specs = {'hw:cpu_policy': 'dedicated'}
|
specs = {'hw:cpu_policy': 'dedicated'}
|
||||||
@ -920,7 +921,7 @@ class NUMALiveMigrationTest(NUMALiveMigrationBase):
|
|||||||
topo_a = numaclient_a.get_host_topology()
|
topo_a = numaclient_a.get_host_topology()
|
||||||
with host_a_sm.config_options(
|
with host_a_sm.config_options(
|
||||||
('DEFAULT', 'vcpu_pin_set',
|
('DEFAULT', 'vcpu_pin_set',
|
||||||
self._get_cpu_spec(topo_a[0] + topo_a[1]))
|
hardware.format_cpu_spec(topo_a[0] + topo_a[1]))
|
||||||
):
|
):
|
||||||
self.live_migrate(server_b['id'], host_a, 'ACTIVE')
|
self.live_migrate(server_b['id'], host_a, 'ACTIVE')
|
||||||
|
|
||||||
@ -1141,7 +1142,7 @@ class NUMACPUDedicatedLiveMigrationTest(NUMALiveMigrationBase):
|
|||||||
raise cls.skipException(msg)
|
raise cls.skipException(msg)
|
||||||
|
|
||||||
def test_collocation_migration(self):
|
def test_collocation_migration(self):
|
||||||
cpu_list = self.get_all_cpus()
|
cpu_list = hardware.get_all_cpus()
|
||||||
if len(cpu_list) < 4:
|
if len(cpu_list) < 4:
|
||||||
raise self.skipException('Requires at least 4 pCPUs to run')
|
raise self.skipException('Requires at least 4 pCPUs to run')
|
||||||
|
|
||||||
@ -1165,15 +1166,17 @@ class NUMACPUDedicatedLiveMigrationTest(NUMALiveMigrationBase):
|
|||||||
self.os_admin.services_client)
|
self.os_admin.services_client)
|
||||||
|
|
||||||
with whitebox_utils.multicontext(
|
with whitebox_utils.multicontext(
|
||||||
host1_sm.config_options(('compute', 'cpu_dedicated_set',
|
host1_sm.config_options(
|
||||||
self._get_cpu_spec(host1_dedicated_set)),
|
('compute', 'cpu_dedicated_set',
|
||||||
|
hardware.format_cpu_spec(host1_dedicated_set)),
|
||||||
('compute', 'cpu_shared_set',
|
('compute', 'cpu_shared_set',
|
||||||
self._get_cpu_spec(host1_shared_set))
|
hardware.format_cpu_spec(host1_shared_set))
|
||||||
),
|
),
|
||||||
host2_sm.config_options(('compute', 'cpu_dedicated_set',
|
host2_sm.config_options(
|
||||||
self._get_cpu_spec(host2_dedicated_set)),
|
('compute', 'cpu_dedicated_set',
|
||||||
|
hardware.format_cpu_spec(host2_dedicated_set)),
|
||||||
('compute', 'cpu_shared_set',
|
('compute', 'cpu_shared_set',
|
||||||
self._get_cpu_spec(host2_shared_set))
|
hardware.format_cpu_spec(host2_shared_set))
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
# Create a total of four instances, with each compute host holding
|
# Create a total of four instances, with each compute host holding
|
||||||
|
@ -22,6 +22,7 @@ from tempest import config
|
|||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
|
|
||||||
from whitebox_tempest_plugin.api.compute import base
|
from whitebox_tempest_plugin.api.compute import base
|
||||||
|
from whitebox_tempest_plugin import hardware
|
||||||
from whitebox_tempest_plugin.services import clients
|
from whitebox_tempest_plugin.services import clients
|
||||||
from whitebox_tempest_plugin import utils as whitebox_utils
|
from whitebox_tempest_plugin import utils as whitebox_utils
|
||||||
|
|
||||||
@ -107,7 +108,7 @@ class LiveMigrationAndReboot(LiveMigrationBase):
|
|||||||
|
|
||||||
def _migrate_and_reboot_instance(self, section, cpu_set_parameter):
|
def _migrate_and_reboot_instance(self, section, cpu_set_parameter):
|
||||||
flavor_vcpu_size = 2
|
flavor_vcpu_size = 2
|
||||||
cpu_list = self.get_all_cpus()
|
cpu_list = hardware.get_all_cpus()
|
||||||
if len(cpu_list) < 4:
|
if len(cpu_list) < 4:
|
||||||
raise self.skipException('Requires 4 or more pCPUs to execute '
|
raise self.skipException('Requires 4 or more pCPUs to execute '
|
||||||
'the test')
|
'the test')
|
||||||
@ -130,10 +131,12 @@ class LiveMigrationAndReboot(LiveMigrationBase):
|
|||||||
self.os_admin.services_client)
|
self.os_admin.services_client)
|
||||||
|
|
||||||
with whitebox_utils.multicontext(
|
with whitebox_utils.multicontext(
|
||||||
host1_sm.config_options((section, cpu_set_parameter,
|
host1_sm.config_options(
|
||||||
self._get_cpu_spec(host1_dedicated_set))),
|
(section, cpu_set_parameter,
|
||||||
host2_sm.config_options((section, cpu_set_parameter,
|
hardware.format_cpu_spec(host1_dedicated_set))),
|
||||||
self._get_cpu_spec(host2_dedicated_set)))
|
host2_sm.config_options(
|
||||||
|
(section, cpu_set_parameter,
|
||||||
|
hardware.format_cpu_spec(host2_dedicated_set)))
|
||||||
):
|
):
|
||||||
# Create a server with a dedicated cpu policy
|
# Create a server with a dedicated cpu policy
|
||||||
server = self.create_test_server(
|
server = self.create_test_server(
|
||||||
|
@ -18,6 +18,7 @@ from tempest import exceptions as tempest_exc
|
|||||||
from tempest.lib.common.utils import data_utils
|
from tempest.lib.common.utils import data_utils
|
||||||
|
|
||||||
from whitebox_tempest_plugin.api.compute import base
|
from whitebox_tempest_plugin.api.compute import base
|
||||||
|
from whitebox_tempest_plugin import hardware
|
||||||
from whitebox_tempest_plugin.services import clients
|
from whitebox_tempest_plugin.services import clients
|
||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
@ -287,7 +288,7 @@ class SRIOVNumaAffinity(SRIOVBase):
|
|||||||
cpu_dedicated_set = \
|
cpu_dedicated_set = \
|
||||||
CONF.whitebox_hardware.cpu_topology[affinity_node] + \
|
CONF.whitebox_hardware.cpu_topology[affinity_node] + \
|
||||||
CONF.whitebox_hardware.cpu_topology[second_node]
|
CONF.whitebox_hardware.cpu_topology[second_node]
|
||||||
cpu_dedicated_str = self._get_cpu_spec(cpu_dedicated_set)
|
cpu_dedicated_str = hardware.format_cpu_spec(cpu_dedicated_set)
|
||||||
|
|
||||||
host_sm = clients.NovaServiceManager(host,
|
host_sm = clients.NovaServiceManager(host,
|
||||||
'nova-compute',
|
'nova-compute',
|
||||||
@ -376,7 +377,7 @@ class SRIOVNumaAffinity(SRIOVBase):
|
|||||||
# Node
|
# Node
|
||||||
cpu_dedicated_set = CONF.whitebox_hardware.cpu_topology[
|
cpu_dedicated_set = CONF.whitebox_hardware.cpu_topology[
|
||||||
str(CONF.whitebox_hardware.physnet_numa_affinity)]
|
str(CONF.whitebox_hardware.physnet_numa_affinity)]
|
||||||
cpu_dedicated_str = self._get_cpu_spec(cpu_dedicated_set)
|
cpu_dedicated_str = hardware.format_cpu_spec(cpu_dedicated_set)
|
||||||
host_sm = clients.NovaServiceManager(host, 'nova-compute',
|
host_sm = clients.NovaServiceManager(host, 'nova-compute',
|
||||||
self.os_admin.services_client)
|
self.os_admin.services_client)
|
||||||
|
|
||||||
|
110
whitebox_tempest_plugin/hardware.py
Normal file
110
whitebox_tempest_plugin/hardware.py
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
# Copyright 2020 Red Hat
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tempest import config
|
||||||
|
from whitebox_tempest_plugin import exceptions
|
||||||
|
|
||||||
|
|
||||||
|
CONF = config.CONF
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_cpus():
|
||||||
|
"""Aggregate the dictionary values of [whitebox]/cpu_topology from
|
||||||
|
tempest.conf into a list of pCPU ids.
|
||||||
|
"""
|
||||||
|
topology_dict = CONF.whitebox_hardware.cpu_topology
|
||||||
|
cpus = []
|
||||||
|
[cpus.extend(c) for c in topology_dict.values()]
|
||||||
|
return cpus
|
||||||
|
|
||||||
|
|
||||||
|
def parse_cpu_spec(spec):
|
||||||
|
"""Parse a CPU set specification.
|
||||||
|
|
||||||
|
NOTE(artom): This has been lifted from Nova with minor
|
||||||
|
exceptions-related adjustments.
|
||||||
|
|
||||||
|
Each element in the list is either a single CPU number, a range of
|
||||||
|
CPU numbers, or a caret followed by a CPU number to be excluded
|
||||||
|
from a previous range.
|
||||||
|
|
||||||
|
:param spec: cpu set string eg "1-4,^3,6"
|
||||||
|
|
||||||
|
:returns: a set of CPU indexes
|
||||||
|
"""
|
||||||
|
cpuset_ids = set()
|
||||||
|
cpuset_reject_ids = set()
|
||||||
|
for rule in spec.split(','):
|
||||||
|
rule = rule.strip()
|
||||||
|
# Handle multi ','
|
||||||
|
if len(rule) < 1:
|
||||||
|
continue
|
||||||
|
# Note the count limit in the .split() call
|
||||||
|
range_parts = rule.split('-', 1)
|
||||||
|
if len(range_parts) > 1:
|
||||||
|
reject = False
|
||||||
|
if range_parts[0] and range_parts[0][0] == '^':
|
||||||
|
reject = True
|
||||||
|
range_parts[0] = str(range_parts[0][1:])
|
||||||
|
|
||||||
|
# So, this was a range; start by converting the parts to ints
|
||||||
|
try:
|
||||||
|
start, end = [int(p.strip()) for p in range_parts]
|
||||||
|
except ValueError:
|
||||||
|
raise exceptions.InvalidCPUSpec(spec=spec)
|
||||||
|
# Make sure it's a valid range
|
||||||
|
if start > end:
|
||||||
|
raise exceptions.InvalidCPUSpec(spec=spec)
|
||||||
|
# Add available CPU ids to set
|
||||||
|
if not reject:
|
||||||
|
cpuset_ids |= set(range(start, end + 1))
|
||||||
|
else:
|
||||||
|
cpuset_reject_ids |= set(range(start, end + 1))
|
||||||
|
elif rule[0] == '^':
|
||||||
|
# Not a range, the rule is an exclusion rule; convert to int
|
||||||
|
try:
|
||||||
|
cpuset_reject_ids.add(int(rule[1:].strip()))
|
||||||
|
except ValueError:
|
||||||
|
raise exceptions.InvalidCPUSpec(spec=spec)
|
||||||
|
else:
|
||||||
|
# OK, a single CPU to include; convert to int
|
||||||
|
try:
|
||||||
|
cpuset_ids.add(int(rule))
|
||||||
|
except ValueError:
|
||||||
|
raise exceptions.InvalidCPUSpec(spec=spec)
|
||||||
|
|
||||||
|
# Use sets to handle the exclusion rules for us
|
||||||
|
cpuset_ids -= cpuset_reject_ids
|
||||||
|
|
||||||
|
return cpuset_ids
|
||||||
|
|
||||||
|
|
||||||
|
def format_cpu_spec(cpu_list):
|
||||||
|
"""Returns a libvirt-style CPU spec from the provided list of integers. For
|
||||||
|
example, given [0, 2, 3], returns "0,2,3".
|
||||||
|
"""
|
||||||
|
return ','.join(map(str, cpu_list))
|
||||||
|
|
||||||
|
|
||||||
|
def get_pci_address(domain, bus, slot, func):
|
||||||
|
"""Assembles PCI address components into a fully-specified PCI address.
|
||||||
|
|
||||||
|
NOTE(jparker): This has been lifted from nova.pci.utils with no
|
||||||
|
adjustments
|
||||||
|
|
||||||
|
Does not validate that the components are valid hex or wildcard values.
|
||||||
|
:param domain, bus, slot, func: Hex or wildcard strings.
|
||||||
|
:return: A string of the form "<domain>:<bus>:<slot>.<function>".
|
||||||
|
"""
|
||||||
|
return '%s:%s:%s.%s' % (domain, bus, slot, func)
|
@ -70,64 +70,3 @@ def get_ctlplane_address(compute_hostname):
|
|||||||
return CONF.whitebox.ctlplane_addresses[compute_hostname]
|
return CONF.whitebox.ctlplane_addresses[compute_hostname]
|
||||||
|
|
||||||
raise exceptions.CtrlplaneAddressResolutionError(host=compute_hostname)
|
raise exceptions.CtrlplaneAddressResolutionError(host=compute_hostname)
|
||||||
|
|
||||||
|
|
||||||
def parse_cpu_spec(spec):
|
|
||||||
"""Parse a CPU set specification.
|
|
||||||
|
|
||||||
NOTE(artom): This has been lifted from Nova with minor
|
|
||||||
exceptions-related adjustments.
|
|
||||||
|
|
||||||
Each element in the list is either a single CPU number, a range of
|
|
||||||
CPU numbers, or a caret followed by a CPU number to be excluded
|
|
||||||
from a previous range.
|
|
||||||
|
|
||||||
:param spec: cpu set string eg "1-4,^3,6"
|
|
||||||
|
|
||||||
:returns: a set of CPU indexes
|
|
||||||
"""
|
|
||||||
cpuset_ids = set()
|
|
||||||
cpuset_reject_ids = set()
|
|
||||||
for rule in spec.split(','):
|
|
||||||
rule = rule.strip()
|
|
||||||
# Handle multi ','
|
|
||||||
if len(rule) < 1:
|
|
||||||
continue
|
|
||||||
# Note the count limit in the .split() call
|
|
||||||
range_parts = rule.split('-', 1)
|
|
||||||
if len(range_parts) > 1:
|
|
||||||
reject = False
|
|
||||||
if range_parts[0] and range_parts[0][0] == '^':
|
|
||||||
reject = True
|
|
||||||
range_parts[0] = str(range_parts[0][1:])
|
|
||||||
|
|
||||||
# So, this was a range; start by converting the parts to ints
|
|
||||||
try:
|
|
||||||
start, end = [int(p.strip()) for p in range_parts]
|
|
||||||
except ValueError:
|
|
||||||
raise exceptions.InvalidCPUSpec(spec=spec)
|
|
||||||
# Make sure it's a valid range
|
|
||||||
if start > end:
|
|
||||||
raise exceptions.InvalidCPUSpec(spec=spec)
|
|
||||||
# Add available CPU ids to set
|
|
||||||
if not reject:
|
|
||||||
cpuset_ids |= set(range(start, end + 1))
|
|
||||||
else:
|
|
||||||
cpuset_reject_ids |= set(range(start, end + 1))
|
|
||||||
elif rule[0] == '^':
|
|
||||||
# Not a range, the rule is an exclusion rule; convert to int
|
|
||||||
try:
|
|
||||||
cpuset_reject_ids.add(int(rule[1:].strip()))
|
|
||||||
except ValueError:
|
|
||||||
raise exceptions.InvalidCPUSpec(spec=spec)
|
|
||||||
else:
|
|
||||||
# OK, a single CPU to include; convert to int
|
|
||||||
try:
|
|
||||||
cpuset_ids.add(int(rule))
|
|
||||||
except ValueError:
|
|
||||||
raise exceptions.InvalidCPUSpec(spec=spec)
|
|
||||||
|
|
||||||
# Use sets to handle the exclusion rules for us
|
|
||||||
cpuset_ids -= cpuset_reject_ids
|
|
||||||
|
|
||||||
return cpuset_ids
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user