DRYD21 - YAPF formatting for 375626
- Run YAPF for the changes in Gerrit 375626 Change-Id: I0237af779610acb8b565d419b8382a63749e4194
This commit is contained in:
parent
dc9fc231da
commit
32be590a53
@ -52,8 +52,11 @@ class BaseResource(object):
|
||||
json_body = json.loads(raw_body.decode('utf-8'))
|
||||
return json_body
|
||||
except json.JSONDecodeError as jex:
|
||||
print("Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
|
||||
self.error(req.context, "Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
|
||||
print(
|
||||
"Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
|
||||
self.error(
|
||||
req.context,
|
||||
"Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
|
||||
raise errors.InvalidFormat("%s: Invalid JSON in body: %s" %
|
||||
(req.path, jex))
|
||||
else:
|
||||
|
@ -93,30 +93,38 @@ class MaasNodeDriver(NodeDriver):
|
||||
result_detail['detail'].append("Able to connect to MaaS.")
|
||||
if maas_client.test_authentication():
|
||||
self.logger.info("Able to authenitcate with MaaS API.")
|
||||
result_detail['detail'].append("Able to authenticate with MaaS API.")
|
||||
result_detail['detail'].append(
|
||||
"Able to authenticate with MaaS API.")
|
||||
|
||||
boot_res = maas_boot_res.BootResources(maas_client)
|
||||
boot_res.refresh()
|
||||
|
||||
if boot_res.is_importing():
|
||||
self.logger.info("MaaS still importing boot resources.")
|
||||
result_detail['detail'].append("MaaS still importing boot resources.")
|
||||
self.logger.info(
|
||||
"MaaS still importing boot resources.")
|
||||
result_detail['detail'].append(
|
||||
"MaaS still importing boot resources.")
|
||||
result = hd_fields.ActionResult.Failure
|
||||
else:
|
||||
if boot_res.len() > 0:
|
||||
self.logger.info("MaaS has synced boot resources.")
|
||||
result_detail['detail'].append("MaaS has synced boot resources.")
|
||||
self.logger.info(
|
||||
"MaaS has synced boot resources.")
|
||||
result_detail['detail'].append(
|
||||
"MaaS has synced boot resources.")
|
||||
else:
|
||||
self.logger.info("MaaS has no boot resources.")
|
||||
result_detail['detail'].append("MaaS has no boot resources.")
|
||||
result_detail['detail'].append(
|
||||
"MaaS has no boot resources.")
|
||||
result = hd_fields.ActionResult.Failure
|
||||
|
||||
rack_ctlrs = maas_rack.RackControllers(maas_client)
|
||||
rack_ctlrs.refresh()
|
||||
|
||||
if rack_ctlrs.len() == 0:
|
||||
self.logger.info("No rack controllers registered in MaaS")
|
||||
result_detail['detail'].append("No rack controllers registered in MaaS")
|
||||
self.logger.info(
|
||||
"No rack controllers registered in MaaS")
|
||||
result_detail['detail'].append(
|
||||
"No rack controllers registered in MaaS")
|
||||
result = hd_fields.ActionResult.Failure
|
||||
else:
|
||||
for r in rack_ctlrs:
|
||||
@ -125,10 +133,14 @@ class MaasNodeDriver(NodeDriver):
|
||||
|
||||
for s in rack_svc:
|
||||
if s in maas_rack.RackController.REQUIRED_SERVICES:
|
||||
self.logger.info("Service %s on rackd %s is %s" % (s, rack_name, rack_svc[s]))
|
||||
self.logger.info(
|
||||
"Service %s on rackd %s is %s" %
|
||||
(s, rack_name, rack_svc[s]))
|
||||
result_detail['detail'].append(
|
||||
"Service %s on rackd %s is %s" % (s, rack_name, rack_svc[s]))
|
||||
if rack_svc[s] not in ("running", "off"):
|
||||
"Service %s on rackd %s is %s" %
|
||||
(s, rack_name, rack_svc[s]))
|
||||
if rack_svc[s] not in ("running",
|
||||
"off"):
|
||||
result = hd_fields.ActionResult.Failure
|
||||
except errors.TransientDriverError as ex:
|
||||
result_detail['retry'] = True
|
||||
@ -1405,9 +1417,9 @@ class MaasTaskRunner(drivers.DriverTaskRunner):
|
||||
60 // cfg.CONF.maasdriver.poll_interval)
|
||||
|
||||
while (
|
||||
attempts < max_attempts and
|
||||
(machine.status_name != 'Ready' and
|
||||
not machine.status_name.startswith('Failed'))
|
||||
attempts < max_attempts and
|
||||
(machine.status_name != 'Ready' and
|
||||
not machine.status_name.startswith('Failed'))
|
||||
):
|
||||
attempts = attempts + 1
|
||||
time.sleep(cfg.CONF.maasdriver.poll_interval)
|
||||
|
@ -21,10 +21,17 @@ class BootResource(model_base.ResourceBase):
|
||||
|
||||
resource_url = 'boot-resources/{resource_id}/'
|
||||
fields = [
|
||||
'resource_id', 'name', 'type', 'subarches', 'architecture',
|
||||
'resource_id',
|
||||
'name',
|
||||
'type',
|
||||
'subarches',
|
||||
'architecture',
|
||||
]
|
||||
json_fields = [
|
||||
'name', 'type', 'subarches', 'architecture',
|
||||
'name',
|
||||
'type',
|
||||
'subarches',
|
||||
'architecture',
|
||||
]
|
||||
|
||||
def __init__(self, api_client, **kwargs):
|
||||
@ -43,8 +50,7 @@ class BootResources(model_base.ResourceCollectionBase):
|
||||
"""Check if boot resources are importing."""
|
||||
url = self.interpolate_url()
|
||||
|
||||
self.logger.debug(
|
||||
"Checking if boot resources are importing.")
|
||||
self.logger.debug("Checking if boot resources are importing.")
|
||||
resp = self.api_client.get(url, op='is_importing')
|
||||
|
||||
if resp.status_code == 200:
|
||||
@ -52,6 +58,7 @@ class BootResources(model_base.ResourceCollectionBase):
|
||||
self.logger.debug("Boot resource importing status: %s" % resp_json)
|
||||
return resp_json
|
||||
else:
|
||||
msg = "Error checking import status of boot resources: %s - %s" % (resp.status_code, resp.text)
|
||||
msg = "Error checking import status of boot resources: %s - %s" % (
|
||||
resp.status_code, resp.text)
|
||||
self.logger.error(msg)
|
||||
raise errors.DriverError(msg)
|
||||
|
@ -24,13 +24,22 @@ class RackController(model_base.ResourceBase):
|
||||
|
||||
# These are the services that must be 'running'
|
||||
# to consider a rack controller healthy
|
||||
REQUIRED_SERVICES = ['http', 'tgt', 'dhcpd', 'ntp_rack', 'rackd',
|
||||
'tftp']
|
||||
REQUIRED_SERVICES = ['http', 'tgt', 'dhcpd', 'ntp_rack', 'rackd', 'tftp']
|
||||
resource_url = 'rackcontrollers/{resource_id}/'
|
||||
fields = [
|
||||
'resource_id', 'hostname', 'power_type', 'power_state',
|
||||
'power_parameters', 'interfaces', 'boot_interface', 'memory',
|
||||
'cpu_count', 'tag_names', 'status_name', 'boot_mac', 'owner_data',
|
||||
'resource_id',
|
||||
'hostname',
|
||||
'power_type',
|
||||
'power_state',
|
||||
'power_parameters',
|
||||
'interfaces',
|
||||
'boot_interface',
|
||||
'memory',
|
||||
'cpu_count',
|
||||
'tag_names',
|
||||
'status_name',
|
||||
'boot_mac',
|
||||
'owner_data',
|
||||
'service_set',
|
||||
]
|
||||
json_fields = ['hostname', 'power_type']
|
||||
@ -67,7 +76,8 @@ class RackController(model_base.ResourceBase):
|
||||
|
||||
svc_status = {svc: None for svc in RackController.REQUIRED_SERVICES}
|
||||
|
||||
self.logger.debug("Checking service status on rack controller %s" % (self.resource_id))
|
||||
self.logger.debug("Checking service status on rack controller %s" %
|
||||
(self.resource_id))
|
||||
|
||||
for s in getattr(self, 'service_set', []):
|
||||
svc = s.get('name')
|
||||
|
@ -166,7 +166,8 @@ class DrydockClient(object):
|
||||
'node_filter': node_filter,
|
||||
}
|
||||
|
||||
self.logger.debug("drydock_client is calling %s API: body is %s" % (endpoint, str(task_dict)))
|
||||
self.logger.debug("drydock_client is calling %s API: body is %s" %
|
||||
(endpoint, str(task_dict)))
|
||||
|
||||
resp = self.session.post(endpoint, data=task_dict)
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
import requests
|
||||
import logging
|
||||
|
||||
|
||||
class DrydockSession(object):
|
||||
"""
|
||||
A session to the Drydock API maintaining credentials and API options
|
||||
|
@ -67,7 +67,6 @@ class ClientError(ApiError):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
|
||||
class ClientUnauthorizedError(ClientError):
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg, code=401)
|
||||
|
@ -31,7 +31,6 @@ class YamlIngester(IngesterPlugin):
|
||||
def get_name(self):
|
||||
return "yaml"
|
||||
|
||||
|
||||
def ingest_data(self, **kwargs):
|
||||
"""Parse and save design data.
|
||||
|
||||
@ -62,7 +61,6 @@ class YamlIngester(IngesterPlugin):
|
||||
|
||||
return models
|
||||
|
||||
|
||||
def parse_docs(self, yaml_string):
|
||||
"""Translate a YAML string into the internal Drydock model."""
|
||||
models = []
|
||||
@ -106,8 +104,10 @@ class YamlIngester(IngesterPlugin):
|
||||
tag_model.definition = t.get('definition', '')
|
||||
|
||||
if tag_model.type not in ['lshw_xpath']:
|
||||
raise ValueError('Unknown definition type in '
|
||||
'NodeTagDefinition: %s' % (t.definition_type))
|
||||
raise ValueError(
|
||||
'Unknown definition type in '
|
||||
'NodeTagDefinition: %s' %
|
||||
(t.definition_type))
|
||||
model.tag_definitions.append(tag_model)
|
||||
|
||||
auth_keys = spec.get('authorized_keys', [])
|
||||
@ -145,7 +145,9 @@ class YamlIngester(IngesterPlugin):
|
||||
for k, v in location.items():
|
||||
model.location[k] = v
|
||||
|
||||
model.local_networks = [n for n in spec.get('local_networks', [])]
|
||||
model.local_networks = [
|
||||
n for n in spec.get('local_networks', [])
|
||||
]
|
||||
|
||||
models.append(model)
|
||||
else:
|
||||
@ -260,8 +262,10 @@ class YamlIngester(IngesterPlugin):
|
||||
|
||||
dhcp_relay = spec.get('dhcp_relay', None)
|
||||
if dhcp_relay is not None:
|
||||
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
|
||||
model.dhcp_relay_upstream_target = dhcp_relay.get('upstream_target', None)
|
||||
model.dhcp_relay_self_ip = dhcp_relay.get(
|
||||
'self_ip', None)
|
||||
model.dhcp_relay_upstream_target = dhcp_relay.get(
|
||||
'upstream_target', None)
|
||||
|
||||
models.append(model)
|
||||
elif kind == 'HardwareProfile':
|
||||
@ -421,8 +425,9 @@ class YamlIngester(IngesterPlugin):
|
||||
addresses = spec.get('addressing', [])
|
||||
|
||||
if len(addresses) == 0:
|
||||
raise ValueError('BaremetalNode needs at least'
|
||||
' 1 assigned address')
|
||||
raise ValueError(
|
||||
'BaremetalNode needs at least'
|
||||
' 1 assigned address')
|
||||
|
||||
model.addressing = objects.IpAddressAssignmentList(
|
||||
)
|
||||
@ -453,8 +458,7 @@ class YamlIngester(IngesterPlugin):
|
||||
'Unknown API version %s of Kind HostProfile' %
|
||||
(api_version))
|
||||
else:
|
||||
self.log.error(
|
||||
"Error processing document, no kind field")
|
||||
self.log.error("Error processing document, no kind field")
|
||||
continue
|
||||
elif api.startswith('promenade/'):
|
||||
(foo, api_version) = api.split('/')
|
||||
|
@ -59,12 +59,9 @@ class TorSwitch(base.DrydockObject):
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'switch_name':
|
||||
obj_fields.StringField(),
|
||||
'mgmt_ip':
|
||||
obj_fields.StringField(nullable=True),
|
||||
'sdn_api_uri':
|
||||
obj_fields.StringField(nullable=True),
|
||||
'switch_name': obj_fields.StringField(),
|
||||
'mgmt_ip': obj_fields.StringField(nullable=True),
|
||||
'sdn_api_uri': obj_fields.StringField(nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
@ -184,7 +184,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
|
||||
if n.get_id() == network_key:
|
||||
return n
|
||||
|
||||
raise errors.DesignError("Network %s not found in design state" % network_key)
|
||||
raise errors.DesignError(
|
||||
"Network %s not found in design state" % network_key)
|
||||
|
||||
def add_network_link(self, new_network_link):
|
||||
if new_network_link is None:
|
||||
|
@ -51,8 +51,7 @@ class DrydockPolicy(object):
|
||||
'GET'
|
||||
}]),
|
||||
policy.DocumentedRuleDefault('physical_provisioner:create_task',
|
||||
'role:admin',
|
||||
'Create a task', [{
|
||||
'role:admin', 'Create a task', [{
|
||||
'path':
|
||||
'/api/v1.0/tasks',
|
||||
'method':
|
||||
|
Loading…
x
Reference in New Issue
Block a user