Changed ansible directory structure
Moved roles to the parent directory so that different adapters don't have to have duplicated roles in their respective paths. patch2: fix some code to follow new pep8 standards. Change-Id: I748c1730be8045c1cb83f91eaa9f0f551cd20a6f
This commit is contained in:
parent
ea07256545
commit
3d4e5d0af6
@ -205,9 +205,10 @@ class _APIClient(Client):
|
|||||||
return self._put(url, data=data)
|
return self._put(url, data=data)
|
||||||
|
|
||||||
def execute(self, cluster_data, hosts_data, resp_results):
|
def execute(self, cluster_data, hosts_data, resp_results):
|
||||||
"""The process including create or update a cluster and the cluster
|
"""The process includes creating or updating a cluster.
|
||||||
configuration, add or update a host in the cluster, and deploy
|
|
||||||
the updated hosts.
|
The cluster configuration, add or update a host in the cluster,
|
||||||
|
and deploy the updated hosts.
|
||||||
|
|
||||||
:param cluster_data: the dictionary of cluster data
|
:param cluster_data: the dictionary of cluster data
|
||||||
"""
|
"""
|
||||||
|
@ -24,8 +24,10 @@ from compass.apiclient.restful import Client
|
|||||||
|
|
||||||
|
|
||||||
class AddSwitch(object):
|
class AddSwitch(object):
|
||||||
"""A utility class that handles adding a switch and retrieving
|
"""A utility class.
|
||||||
corresponding machines associated with the switch.
|
|
||||||
|
Handles adding a switch and retrieving corresponding machines
|
||||||
|
associated with the switch.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, server_url):
|
def __init__(self, server_url):
|
||||||
@ -33,8 +35,7 @@ class AddSwitch(object):
|
|||||||
self._client = Client(server_url)
|
self._client = Client(server_url)
|
||||||
|
|
||||||
def add_switch(self, queue, ip, snmp_community):
|
def add_switch(self, queue, ip, snmp_community):
|
||||||
"""Add a switch with SNMP credentials and retrieve attached
|
"""Add a switch with SNMP credentials.
|
||||||
server machines.
|
|
||||||
|
|
||||||
:param queue: The result holder for the machine details.
|
:param queue: The result holder for the machine details.
|
||||||
:type queue: A Queue object(thread-safe).
|
:type queue: A Queue object(thread-safe).
|
||||||
|
@ -158,7 +158,9 @@ class ServerPowerMgmt(object):
|
|||||||
|
|
||||||
class HostPowerMgmt(object):
|
class HostPowerMgmt(object):
|
||||||
"""Power management for hosts installed OS by OS installer. OS installer
|
"""Power management for hosts installed OS by OS installer. OS installer
|
||||||
will poweron/poweroff/reset host.
|
|
||||||
|
will poweron/poweroff/reset host.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def poweron(host_id, user):
|
def poweron(host_id, user):
|
||||||
|
@ -77,7 +77,6 @@ class HdsCheck(base.BaseCheck):
|
|||||||
|
|
||||||
def check_apt_snmp(self, pkg_module):
|
def check_apt_snmp(self, pkg_module):
|
||||||
"""do apt health check."""
|
"""do apt health check."""
|
||||||
## TODO(xicheng): add ubuntu package check here
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def check_snmp_mibs(self):
|
def check_snmp_mibs(self):
|
||||||
|
@ -63,6 +63,7 @@ class ActionHelper(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_adapter_info(adapter_id, cluster_id, user):
|
def get_adapter_info(adapter_id, cluster_id, user):
|
||||||
"""Get adapter information. Return a dictionary as below,
|
"""Get adapter information. Return a dictionary as below,
|
||||||
|
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"name": "xxx",
|
"name": "xxx",
|
||||||
@ -93,6 +94,7 @@ class ActionHelper(object):
|
|||||||
...
|
...
|
||||||
}
|
}
|
||||||
To view a complete output, please refer to backend doc.
|
To view a complete output, please refer to backend doc.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
adapter_info = adapter_db.get_adapter(adapter_id, user=user)
|
adapter_info = adapter_db.get_adapter(adapter_id, user=user)
|
||||||
metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
|
metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
|
||||||
@ -111,6 +113,7 @@ class ActionHelper(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_cluster_info(cluster_id, user):
|
def get_cluster_info(cluster_id, user):
|
||||||
"""Get cluster information.Return a dictionary as below,
|
"""Get cluster information.Return a dictionary as below,
|
||||||
|
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"adapter_id": 1,
|
"adapter_id": 1,
|
||||||
@ -149,33 +152,34 @@ class ActionHelper(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_hosts_info(cluster_id, hosts_id_list, user):
|
def get_hosts_info(cluster_id, hosts_id_list, user):
|
||||||
"""Get hosts information. Return a dictionary as below,
|
"""Get hosts information. Return a dictionary as below,
|
||||||
{
|
|
||||||
"hosts": {
|
{
|
||||||
1($host_id): {
|
"hosts": {
|
||||||
"reinstall_os": True,
|
1($host_id): {
|
||||||
"mac": "xxx",
|
"reinstall_os": True,
|
||||||
"name": "xxx",
|
"mac": "xxx",
|
||||||
"roles": [xxx, yyy]
|
"name": "xxx",
|
||||||
},
|
"roles": [xxx, yyy]
|
||||||
"networks": {
|
},
|
||||||
"eth0": {
|
"networks": {
|
||||||
"ip": "192.168.1.1",
|
"eth0": {
|
||||||
"netmask": "255.255.255.0",
|
"ip": "192.168.1.1",
|
||||||
"is_mgmt": True,
|
"netmask": "255.255.255.0",
|
||||||
"is_promiscuous": False,
|
"is_mgmt": True,
|
||||||
"subnet": "192.168.1.0/24"
|
"is_promiscuous": False,
|
||||||
},
|
"subnet": "192.168.1.0/24"
|
||||||
"eth1": {...}
|
},
|
||||||
},
|
"eth1": {...}
|
||||||
"os_config": {},
|
},
|
||||||
"package_config": {},
|
"os_config": {},
|
||||||
"deployed_os_config": {},
|
"package_config": {},
|
||||||
"deployed_package_config": {}
|
"deployed_os_config": {},
|
||||||
},
|
"deployed_package_config": {}
|
||||||
2: {...},
|
},
|
||||||
....
|
2: {...},
|
||||||
}
|
....
|
||||||
}
|
}
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
hosts_info = {}
|
hosts_info = {}
|
||||||
for host_id in hosts_id_list:
|
for host_id in hosts_id_list:
|
||||||
|
@ -38,8 +38,9 @@ class ItemNotFound(HTTPException):
|
|||||||
|
|
||||||
|
|
||||||
class BadRequest(HTTPException):
|
class BadRequest(HTTPException):
|
||||||
"""Define the exception for invalid/missing parameters or a user makes
|
"""Define the exception for invalid/missing parameters.
|
||||||
a request in invalid state and cannot be processed at this moment.
|
|
||||||
|
User making a request in invalid state cannot be processed.
|
||||||
"""
|
"""
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(BadRequest, self).__init__(message, 400)
|
super(BadRequest, self).__init__(message, 400)
|
||||||
@ -52,23 +53,19 @@ class Unauthorized(HTTPException):
|
|||||||
|
|
||||||
|
|
||||||
class UserDisabled(HTTPException):
|
class UserDisabled(HTTPException):
|
||||||
"""Define the exception that a disabled user tries to do some operations.
|
"""Define the exception for disabled users."""
|
||||||
"""
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(UserDisabled, self).__init__(message, 403)
|
super(UserDisabled, self).__init__(message, 403)
|
||||||
|
|
||||||
|
|
||||||
class Forbidden(HTTPException):
|
class Forbidden(HTTPException):
|
||||||
"""Define the exception that a user tries to do some operations without
|
"""Define the exception for invalid permissions."""
|
||||||
valid permissions.
|
|
||||||
"""
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(Forbidden, self).__init__(message, 403)
|
super(Forbidden, self).__init__(message, 403)
|
||||||
|
|
||||||
|
|
||||||
class BadMethod(HTTPException):
|
class BadMethod(HTTPException):
|
||||||
"""Define the exception for invoking unsupprted or unimplemented methods.
|
"""Define the exception for invoking unsupported methods."""
|
||||||
"""
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(BadMethod, self).__init__(message, 405)
|
super(BadMethod, self).__init__(message, 405)
|
||||||
|
|
||||||
|
@ -286,7 +286,7 @@ class Client(object):
|
|||||||
return self._delete('/switches/%s/machines/%s' %
|
return self._delete('/switches/%s/machines/%s' %
|
||||||
(switch_id, machine_id))
|
(switch_id, machine_id))
|
||||||
|
|
||||||
## test these
|
# test these
|
||||||
def poll_switch(self, switch_id):
|
def poll_switch(self, switch_id):
|
||||||
data = {}
|
data = {}
|
||||||
data['find_machines'] = None
|
data['find_machines'] = None
|
||||||
@ -306,7 +306,7 @@ class Client(object):
|
|||||||
data = {}
|
data = {}
|
||||||
data['set_machines'] = group_machines
|
data['set_machines'] = group_machines
|
||||||
return self._post('/switches/%s/action' % switch_id, data=data)
|
return self._post('/switches/%s/action' % switch_id, data=data)
|
||||||
## end
|
# end
|
||||||
|
|
||||||
def list_switchmachines(self, switch_ip_int=None, port=None, vlans=None,
|
def list_switchmachines(self, switch_ip_int=None, port=None, vlans=None,
|
||||||
mac=None, tag=None, location=None):
|
mac=None, tag=None, location=None):
|
||||||
@ -621,7 +621,7 @@ class Client(object):
|
|||||||
def delete_cluster_config(self, cluster_id):
|
def delete_cluster_config(self, cluster_id):
|
||||||
return self._delete('/clusters/%s/config' % cluster_id)
|
return self._delete('/clusters/%s/config' % cluster_id)
|
||||||
|
|
||||||
## test these
|
# test these
|
||||||
def add_hosts_to_cluster(self, cluster_id, hosts):
|
def add_hosts_to_cluster(self, cluster_id, hosts):
|
||||||
data = {}
|
data = {}
|
||||||
data['add_hosts'] = hosts
|
data['add_hosts'] = hosts
|
||||||
|
@ -28,7 +28,7 @@ COMPASS_SERVER_URL = 'http://127.0.0.1/api'
|
|||||||
SWITCH_IP = '10.145.81.220'
|
SWITCH_IP = '10.145.81.220'
|
||||||
SWITCH_SNMP_VERSION = 'v2c'
|
SWITCH_SNMP_VERSION = 'v2c'
|
||||||
SWITCH_SNMP_COMMUNITY = 'public'
|
SWITCH_SNMP_COMMUNITY = 'public'
|
||||||
#MACHINES_TO_ADD = ['00:11:20:30:40:01']
|
# MACHINES_TO_ADD = ['00:11:20:30:40:01']
|
||||||
CLUSTER_NAME = 'cluster2'
|
CLUSTER_NAME = 'cluster2'
|
||||||
HOST_NAME_PREFIX = 'host'
|
HOST_NAME_PREFIX = 'host'
|
||||||
SERVER_USERNAME = 'root'
|
SERVER_USERNAME = 'root'
|
||||||
@ -38,11 +38,11 @@ SERVICE_PASSWORD = 'service'
|
|||||||
CONSOLE_USERNAME = 'console'
|
CONSOLE_USERNAME = 'console'
|
||||||
CONSOLE_PASSWORD = 'console'
|
CONSOLE_PASSWORD = 'console'
|
||||||
HA_VIP = ''
|
HA_VIP = ''
|
||||||
#NAMESERVERS = '192.168.10.6'
|
# NAMESERVERS = '192.168.10.6'
|
||||||
SEARCH_PATH = 'ods.com'
|
SEARCH_PATH = 'ods.com'
|
||||||
#GATEWAY = '192.168.10.6'
|
# GATEWAY = '192.168.10.6'
|
||||||
#PROXY = 'http://192.168.10.6:3128'
|
# PROXY = 'http://192.168.10.6:3128'
|
||||||
#NTP_SERVER = '192.168.10.6'
|
# NTP_SERVER = '192.168.10.6'
|
||||||
MANAGEMENT_IP_START = '192.168.10.130'
|
MANAGEMENT_IP_START = '192.168.10.130'
|
||||||
MANAGEMENT_IP_END = '192.168.10.254'
|
MANAGEMENT_IP_END = '192.168.10.254'
|
||||||
MANAGEMENT_IP_GATEWAY = '192.168.10.1'
|
MANAGEMENT_IP_GATEWAY = '192.168.10.1'
|
||||||
@ -70,7 +70,7 @@ STORAGE_PROMISC = 0
|
|||||||
HOME_PERCENTAGE = 5
|
HOME_PERCENTAGE = 5
|
||||||
TMP_PERCENTAGE = 5
|
TMP_PERCENTAGE = 5
|
||||||
VAR_PERCENTAGE = 10
|
VAR_PERCENTAGE = 10
|
||||||
#ROLES_LIST = [['os-dashboard']]
|
# ROLES_LIST = [['os-dashboard']]
|
||||||
|
|
||||||
PRESET_VALUES = {
|
PRESET_VALUES = {
|
||||||
'NAMESERVERS': '192.168.10.1',
|
'NAMESERVERS': '192.168.10.1',
|
||||||
|
@ -275,8 +275,7 @@ class Client(object):
|
|||||||
return self._get('/machines/%s' % machine_id)
|
return self._get('/machines/%s' % machine_id)
|
||||||
|
|
||||||
def get_clusters(self):
|
def get_clusters(self):
|
||||||
"""Lists the details for all clusters.
|
"""Lists the details for all clusters."""
|
||||||
"""
|
|
||||||
return self._get('/clusters')
|
return self._get('/clusters')
|
||||||
|
|
||||||
def get_cluster(self, cluster_id):
|
def get_cluster(self, cluster_id):
|
||||||
|
@ -1772,8 +1772,7 @@ def update_cluster_host_state_internal(
|
|||||||
for clusterhost_in_cluster in cluster.clusterhosts:
|
for clusterhost_in_cluster in cluster.clusterhosts:
|
||||||
if (
|
if (
|
||||||
clusterhost_in_cluster.clusterhost_id
|
clusterhost_in_cluster.clusterhost_id
|
||||||
==
|
== clusterhost.clusterhost_id
|
||||||
clusterhost.clusterhost_id
|
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
if not clusterhost_in_cluster.state.ready:
|
if not clusterhost_in_cluster.state.ready:
|
||||||
@ -1876,8 +1875,7 @@ def update_clusterhost_state_internal(
|
|||||||
for clusterhost_in_cluster in cluster.clusterhosts:
|
for clusterhost_in_cluster in cluster.clusterhosts:
|
||||||
if (
|
if (
|
||||||
clusterhost_in_cluster.clusterhost_id
|
clusterhost_in_cluster.clusterhost_id
|
||||||
==
|
== clusterhost.clusterhost_id
|
||||||
clusterhost.clusterhost_id
|
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
if not clusterhost_in_cluster.state.ready:
|
if not clusterhost_in_cluster.state.ready:
|
||||||
|
@ -33,9 +33,11 @@ MAPPER = {
|
|||||||
|
|
||||||
|
|
||||||
def validate_config(session, config, id_name, id_value, patch=True):
|
def validate_config(session, config, id_name, id_value, patch=True):
|
||||||
"""Validates the given config value according to the config
|
"""Validates config.
|
||||||
metadata of the asscoiated os_id or adapter_id. Returns
|
|
||||||
a tuple (status, message).
|
Validates the given config value according to the config
|
||||||
|
metadata of the asscoiated os_id or adapter_id. Returns
|
||||||
|
a tuple (status, message).
|
||||||
"""
|
"""
|
||||||
if id_name not in MAPPER.keys():
|
if id_name not in MAPPER.keys():
|
||||||
return (False, "Invalid id type %s" % id_name)
|
return (False, "Invalid id type %s" % id_name)
|
||||||
|
@ -49,6 +49,7 @@ class Unauthorized(DatabaseException):
|
|||||||
|
|
||||||
class UserDisabled(DatabaseException):
|
class UserDisabled(DatabaseException):
|
||||||
"""Define the exception that a disabled user tries to do some operations.
|
"""Define the exception that a disabled user tries to do some operations.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(UserDisabled, self).__init__(message)
|
super(UserDisabled, self).__init__(message)
|
||||||
@ -57,7 +58,9 @@ class UserDisabled(DatabaseException):
|
|||||||
|
|
||||||
class Forbidden(DatabaseException):
|
class Forbidden(DatabaseException):
|
||||||
"""Define the exception that a user is trying to make some action
|
"""Define the exception that a user is trying to make some action
|
||||||
without the right permission.
|
|
||||||
|
without the right permission.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(Forbidden, self).__init__(message)
|
super(Forbidden, self).__init__(message)
|
||||||
@ -73,6 +76,7 @@ class NotAcceptable(DatabaseException):
|
|||||||
|
|
||||||
class InvalidParameter(DatabaseException):
|
class InvalidParameter(DatabaseException):
|
||||||
"""Define the exception that the request has invalid or missing parameters.
|
"""Define the exception that the request has invalid or missing parameters.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(InvalidParameter, self).__init__(message)
|
super(InvalidParameter, self).__init__(message)
|
||||||
@ -81,6 +85,7 @@ class InvalidParameter(DatabaseException):
|
|||||||
|
|
||||||
class InvalidResponse(DatabaseException):
|
class InvalidResponse(DatabaseException):
|
||||||
"""Define the exception that the response is invalid.
|
"""Define the exception that the response is invalid.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
super(InvalidResponse, self).__init__(message)
|
super(InvalidResponse, self).__init__(message)
|
||||||
|
@ -541,6 +541,7 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
|
|||||||
|
|
||||||
class ClusterHostLogHistory(BASE, LogHistoryMixin):
|
class ClusterHostLogHistory(BASE, LogHistoryMixin):
|
||||||
"""clusterhost installing log history for each file.
|
"""clusterhost installing log history for each file.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
__tablename__ = 'clusterhost_log_history'
|
__tablename__ = 'clusterhost_log_history'
|
||||||
|
|
||||||
@ -577,6 +578,7 @@ class ClusterHostLogHistory(BASE, LogHistoryMixin):
|
|||||||
|
|
||||||
class HostLogHistory(BASE, LogHistoryMixin):
|
class HostLogHistory(BASE, LogHistoryMixin):
|
||||||
"""host installing log history for each file.
|
"""host installing log history for each file.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
__tablename__ = 'host_log_history'
|
__tablename__ = 'host_log_history'
|
||||||
|
|
||||||
|
@ -32,12 +32,12 @@ from flask.ext.login import UserMixin
|
|||||||
from itsdangerous import URLSafeTimedSerializer
|
from itsdangerous import URLSafeTimedSerializer
|
||||||
|
|
||||||
BASE = declarative_base()
|
BASE = declarative_base()
|
||||||
#TODO(grace) SECRET_KEY should be generated when installing compass
|
# TODO(grace) SECRET_KEY should be generated when installing compass
|
||||||
#and save to a config file or DB
|
# and save to a config file or DB
|
||||||
SECRET_KEY = "abcd"
|
SECRET_KEY = "abcd"
|
||||||
|
|
||||||
#This is used for generating a token by user's ID and
|
# This is used for generating a token by user's ID and
|
||||||
#decode the ID from this token
|
# decode the ID from this token
|
||||||
login_serializer = URLSafeTimedSerializer(SECRET_KEY)
|
login_serializer = URLSafeTimedSerializer(SECRET_KEY)
|
||||||
|
|
||||||
|
|
||||||
|
@ -92,8 +92,9 @@ class DeployManager(object):
|
|||||||
self.clean_progress()
|
self.clean_progress()
|
||||||
|
|
||||||
def deploy_os(self):
|
def deploy_os(self):
|
||||||
"""Deploy OS to hosts which need to in the cluster. Return OS deployed
|
"""Deploy OS to hosts which need to in the cluster.
|
||||||
config.
|
|
||||||
|
Return OS deployed config.
|
||||||
"""
|
"""
|
||||||
if not self.os_installer:
|
if not self.os_installer:
|
||||||
return {}
|
return {}
|
||||||
@ -113,8 +114,9 @@ class DeployManager(object):
|
|||||||
return self.os_installer.deploy()
|
return self.os_installer.deploy()
|
||||||
|
|
||||||
def deploy_target_system(self):
|
def deploy_target_system(self):
|
||||||
"""Deploy target system to all hosts in the cluster. Return package
|
"""Deploy target system to all hosts in the cluster.
|
||||||
deployed config.
|
|
||||||
|
Return package deployed config.
|
||||||
"""
|
"""
|
||||||
if not self.pk_installer:
|
if not self.pk_installer:
|
||||||
return {}
|
return {}
|
||||||
|
@ -41,8 +41,9 @@ class BaseConfigManager(object):
|
|||||||
return self.__get_cluster_item(const.OS_VERSION)
|
return self.__get_cluster_item(const.OS_VERSION)
|
||||||
|
|
||||||
def get_cluster_baseinfo(self):
|
def get_cluster_baseinfo(self):
|
||||||
"""Get cluster base information, including cluster_id, os_version,
|
"""Get cluster base information.
|
||||||
and cluster_name.
|
|
||||||
|
Including cluster_id, os_version and cluster_name.
|
||||||
"""
|
"""
|
||||||
attr_names = [const.ID, const.NAME, const.OS_VERSION]
|
attr_names = [const.ID, const.NAME, const.OS_VERSION]
|
||||||
|
|
||||||
@ -351,21 +352,22 @@ class BaseConfigManager(object):
|
|||||||
|
|
||||||
def _get_cluster_roles_mapping_helper(self):
|
def _get_cluster_roles_mapping_helper(self):
|
||||||
"""The ouput format will be as below, for example:
|
"""The ouput format will be as below, for example:
|
||||||
{
|
|
||||||
"controller": [{
|
{
|
||||||
"hostname": "xxx",
|
"controller": [{
|
||||||
"management": {
|
"hostname": "xxx",
|
||||||
"interface": "eth0",
|
"management": {
|
||||||
"ip": "192.168.1.10",
|
"interface": "eth0",
|
||||||
"netmask": "255.255.255.0",
|
"ip": "192.168.1.10",
|
||||||
"subnet": "192.168.1.0/24",
|
"netmask": "255.255.255.0",
|
||||||
"is_mgmt": True,
|
"subnet": "192.168.1.0/24",
|
||||||
"is_promiscuous": False
|
"is_mgmt": True,
|
||||||
},
|
"is_promiscuous": False
|
||||||
...
|
},
|
||||||
}],
|
...
|
||||||
...
|
}],
|
||||||
}
|
...
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
mapping = {}
|
mapping = {}
|
||||||
hosts_id_list = self.get_host_id_list()
|
hosts_id_list = self.get_host_id_list()
|
||||||
|
@ -60,8 +60,9 @@ class BaseInstaller(object):
|
|||||||
|
|
||||||
def get_tmpl_vars_from_metadata(self, metadata, config):
|
def get_tmpl_vars_from_metadata(self, metadata, config):
|
||||||
"""Get variables dictionary for rendering templates from metadata.
|
"""Get variables dictionary for rendering templates from metadata.
|
||||||
:param dict metadata: The metadata dictionary.
|
|
||||||
:param dict config: The
|
:param dict metadata: The metadata dictionary.
|
||||||
|
:param dict config: The
|
||||||
"""
|
"""
|
||||||
template_vars = {}
|
template_vars = {}
|
||||||
self._get_tmpl_vars_helper(metadata, config, template_vars)
|
self._get_tmpl_vars_helper(metadata, config, template_vars)
|
||||||
@ -69,18 +70,19 @@ class BaseInstaller(object):
|
|||||||
return template_vars
|
return template_vars
|
||||||
|
|
||||||
def _get_key_mapping(self, metadata, key, is_regular_key):
|
def _get_key_mapping(self, metadata, key, is_regular_key):
|
||||||
"""Get the keyword which the input key maps to. This keyword will be
|
"""Get the keyword which the input key maps to.
|
||||||
added to dictionary used to render templates.
|
|
||||||
|
|
||||||
If the key in metadata has a mapping to another keyword which is
|
This keyword will be added to dictionary used to render templates.
|
||||||
used for templates, then return this keyword. If the key is started
|
|
||||||
with '$', which is a variable in metadata, return the key itself as
|
|
||||||
the mapping keyword. If the key has no mapping, return None.
|
|
||||||
|
|
||||||
:param dict metadata: metadata/submetadata dictionary.
|
If the key in metadata has a mapping to another keyword which is
|
||||||
:param str key: The keyword defined in metadata.
|
used for templates, then return this keyword. If the key is started
|
||||||
:param bool is_regular_key: False when the key defined in metadata
|
with '$', which is a variable in metadata, return the key itself as
|
||||||
is a variable(starting with '$').
|
the mapping keyword. If the key has no mapping, return None.
|
||||||
|
|
||||||
|
:param dict metadata: metadata/submetadata dictionary.
|
||||||
|
:param str key: The keyword defined in metadata.
|
||||||
|
:param bool is_regular_key: False when the key defined in metadata
|
||||||
|
is a variable(starting with '$').
|
||||||
"""
|
"""
|
||||||
mapping_to = key
|
mapping_to = key
|
||||||
if is_regular_key:
|
if is_regular_key:
|
||||||
@ -92,12 +94,14 @@ class BaseInstaller(object):
|
|||||||
return mapping_to
|
return mapping_to
|
||||||
|
|
||||||
def _get_submeta_by_key(self, metadata, key):
|
def _get_submeta_by_key(self, metadata, key):
|
||||||
"""Get submetadata dictionary based on current metadata key. And
|
"""Get submetadata dictionary.
|
||||||
determines the input key is a regular string keyword or a variable
|
|
||||||
keyword defined in metadata, which starts with '$'.
|
|
||||||
|
|
||||||
:param dict metadata: The metadata dictionary.
|
Based on current metadata key. And
|
||||||
:param str key: The keyword defined in the metadata.
|
determines the input key is a regular string keyword or a variable
|
||||||
|
keyword defined in metadata, which starts with '$'.
|
||||||
|
|
||||||
|
:param dict metadata: The metadata dictionary.
|
||||||
|
:param str key: The keyword defined in the metadata.
|
||||||
"""
|
"""
|
||||||
if key in metadata:
|
if key in metadata:
|
||||||
return (True, metadata[key])
|
return (True, metadata[key])
|
||||||
|
@ -89,10 +89,11 @@ class CobblerInstaller(OSInstaller):
|
|||||||
|
|
||||||
def get_supported_oses(self):
|
def get_supported_oses(self):
|
||||||
"""get supported os versions.
|
"""get supported os versions.
|
||||||
.. note::
|
|
||||||
In cobbler, we treat profile name as the indicator
|
note::
|
||||||
of os version. It is just a simple indicator
|
In cobbler, we treat profile name as the indicator
|
||||||
and not accurate.
|
of os version. It is just a simple indicator
|
||||||
|
and not accurate.
|
||||||
"""
|
"""
|
||||||
profiles = self.remote.get_profiles()
|
profiles = self.remote.get_profiles()
|
||||||
oses = []
|
oses = []
|
||||||
@ -102,20 +103,21 @@ class CobblerInstaller(OSInstaller):
|
|||||||
|
|
||||||
def deploy(self):
|
def deploy(self):
|
||||||
"""Sync cobbler to catch up the latest update config and start to
|
"""Sync cobbler to catch up the latest update config and start to
|
||||||
install OS. Return both cluster and hosts deploy configs. The return
|
|
||||||
format:
|
install OS. Return both cluster and hosts deploy configs. The return
|
||||||
{
|
format:
|
||||||
"cluster": {
|
{
|
||||||
"id": 1,
|
"cluster": {
|
||||||
"deployed_os_config": {},
|
"id": 1,
|
||||||
},
|
"deployed_os_config": {},
|
||||||
"hosts": {
|
},
|
||||||
1($clusterhost_id): {
|
"hosts": {
|
||||||
"deployed_os_config": {...},
|
1($clusterhost_id): {
|
||||||
},
|
"deployed_os_config": {...},
|
||||||
....
|
},
|
||||||
}
|
....
|
||||||
}
|
}
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
host_ids = self.config_manager.get_hosts_id_list_for_os_installation()
|
host_ids = self.config_manager.get_hosts_id_list_for_os_installation()
|
||||||
if not host_ids:
|
if not host_ids:
|
||||||
@ -183,13 +185,14 @@ class CobblerInstaller(OSInstaller):
|
|||||||
|
|
||||||
def set_package_installer_config(self, package_configs):
|
def set_package_installer_config(self, package_configs):
|
||||||
"""Cobbler can install and configure package installer right after
|
"""Cobbler can install and configure package installer right after
|
||||||
OS installation compelets by setting package_config info provided
|
|
||||||
by package installer.
|
|
||||||
|
|
||||||
:param dict package_configs: The dict of config generated by package
|
OS installation compelets by setting package_config info provided
|
||||||
installer for each clusterhost. The IDs
|
by package installer.
|
||||||
of clusterhosts are the keys of
|
|
||||||
package_configs.
|
:param dict package_configs: The dict of config generated by package
|
||||||
|
installer for each clusterhost. The IDs
|
||||||
|
of clusterhosts are the keys of
|
||||||
|
package_configs.
|
||||||
"""
|
"""
|
||||||
self.pk_installer_config = package_configs
|
self.pk_installer_config = package_configs
|
||||||
|
|
||||||
@ -309,7 +312,8 @@ class CobblerInstaller(OSInstaller):
|
|||||||
|
|
||||||
def delete_single_host(self, host_id):
|
def delete_single_host(self, host_id):
|
||||||
"""Delete the host from cobbler server and clean up the installation
|
"""Delete the host from cobbler server and clean up the installation
|
||||||
progress.
|
|
||||||
|
progress.
|
||||||
"""
|
"""
|
||||||
hostname = self.config_manager.get_hostname(host_id)
|
hostname = self.config_manager.get_hostname(host_id)
|
||||||
try:
|
try:
|
||||||
@ -321,8 +325,7 @@ class CobblerInstaller(OSInstaller):
|
|||||||
logging.exception(ex)
|
logging.exception(ex)
|
||||||
|
|
||||||
def _get_host_tmpl_vars_dict(self, host_id, global_vars_dict, **kwargs):
|
def _get_host_tmpl_vars_dict(self, host_id, global_vars_dict, **kwargs):
|
||||||
"""Generate template variables dictionary.
|
"""Generate template variables dictionary."""
|
||||||
"""
|
|
||||||
vars_dict = {}
|
vars_dict = {}
|
||||||
if global_vars_dict:
|
if global_vars_dict:
|
||||||
# Set cluster template vars_dict from cluster os_config.
|
# Set cluster template vars_dict from cluster os_config.
|
||||||
|
@ -95,15 +95,15 @@ class AnsibleInstaller(PKInstaller):
|
|||||||
self.__class__.__name__, self.NAME, self.installer_url)
|
self.__class__.__name__, self.NAME, self.installer_url)
|
||||||
|
|
||||||
def generate_installer_config(self):
|
def generate_installer_config(self):
|
||||||
"""Render ansible config file by OS installing right after
|
"""Render ansible config file by OS installing.
|
||||||
OS is installed successfully.
|
|
||||||
The output format:
|
The output format:
|
||||||
{
|
{
|
||||||
'1'($host_id/clusterhost_id):{
|
'1'($host_id/clusterhost_id):{
|
||||||
'tool': 'ansible',
|
'tool': 'ansible',
|
||||||
},
|
},
|
||||||
.....
|
.....
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
host_ids = self.config_manager.get_host_id_list()
|
host_ids = self.config_manager.get_host_id_list()
|
||||||
os_installer_configs = {}
|
os_installer_configs = {}
|
||||||
@ -119,21 +119,23 @@ class AnsibleInstaller(PKInstaller):
|
|||||||
return "-".join((dist_sys_name, cluster_name))
|
return "-".join((dist_sys_name, cluster_name))
|
||||||
|
|
||||||
def _get_cluster_tmpl_vars(self):
|
def _get_cluster_tmpl_vars(self):
|
||||||
"""Generate template variables dict based on cluster level config.
|
"""Generate template variables dict
|
||||||
The vars_dict will be:
|
|
||||||
{
|
Generates based on cluster level config.
|
||||||
"baseinfo": {
|
The vars_dict will be:
|
||||||
"id":1,
|
{
|
||||||
"name": "cluster01",
|
"baseinfo": {
|
||||||
...
|
"id":1,
|
||||||
},
|
"name": "cluster01",
|
||||||
"package_config": {
|
...
|
||||||
.... //mapped from original package config based on metadata
|
},
|
||||||
},
|
"package_config": {
|
||||||
"role_mapping": {
|
.... //mapped from original package config based on metadata
|
||||||
....
|
},
|
||||||
}
|
"role_mapping": {
|
||||||
}
|
....
|
||||||
|
}
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
cluster_vars_dict = {}
|
cluster_vars_dict = {}
|
||||||
# set cluster basic information to vars_dict
|
# set cluster basic information to vars_dict
|
||||||
@ -242,7 +244,7 @@ class AnsibleInstaller(PKInstaller):
|
|||||||
files = self.runner_files
|
files = self.runner_files
|
||||||
for dir in dirs:
|
for dir in dirs:
|
||||||
shutil.copytree(
|
shutil.copytree(
|
||||||
os.path.join(self.adapter_dir, dir),
|
os.path.join(self.ansible_dir, dir),
|
||||||
os.path.join(
|
os.path.join(
|
||||||
ansible_run_destination,
|
ansible_run_destination,
|
||||||
dir
|
dir
|
||||||
@ -289,24 +291,26 @@ class AnsibleInstaller(PKInstaller):
|
|||||||
self.serialize_config(cfg_config, cfg_destination)
|
self.serialize_config(cfg_config, cfg_destination)
|
||||||
|
|
||||||
def deploy(self):
|
def deploy(self):
|
||||||
"""Start to deploy a distributed system. Return both cluster and hosts
|
"""Start to deploy a distributed system.
|
||||||
deployed configs. The return format:
|
|
||||||
{
|
Return both cluster and hosts deployed configs.
|
||||||
"cluster": {
|
The return format:
|
||||||
"id": 1,
|
{
|
||||||
"deployed_package_config": {
|
"cluster": {
|
||||||
"roles_mapping": {...},
|
"id": 1,
|
||||||
"service_credentials": {...},
|
"deployed_package_config": {
|
||||||
....
|
"roles_mapping": {...},
|
||||||
}
|
"service_credentials": {...},
|
||||||
},
|
....
|
||||||
"hosts": {
|
}
|
||||||
1($clusterhost_id): {
|
},
|
||||||
"deployed_package_config": {...}
|
"hosts": {
|
||||||
},
|
1($clusterhost_id): {
|
||||||
....
|
"deployed_package_config": {...}
|
||||||
}
|
},
|
||||||
}
|
....
|
||||||
|
}
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
host_list = self.config_manager.get_host_id_list()
|
host_list = self.config_manager.get_host_id_list()
|
||||||
if not host_list:
|
if not host_list:
|
||||||
|
@ -103,8 +103,9 @@ class ChefInstaller(PKInstaller):
|
|||||||
return "-".join((dist_sys_name, cluster_name))
|
return "-".join((dist_sys_name, cluster_name))
|
||||||
|
|
||||||
def get_create_databag(self, databag_name):
|
def get_create_databag(self, databag_name):
|
||||||
"""Get databag object from chef server. Create the databag if it
|
"""Get databag object from chef server.
|
||||||
does not exist.
|
|
||||||
|
Creates the databag if it does not exist.
|
||||||
"""
|
"""
|
||||||
import chef
|
import chef
|
||||||
databag = None
|
databag = None
|
||||||
@ -116,11 +117,13 @@ class ChefInstaller(PKInstaller):
|
|||||||
return databag
|
return databag
|
||||||
|
|
||||||
def get_create_node(self, node_name, env_name=None):
|
def get_create_node(self, node_name, env_name=None):
|
||||||
"""Get chef node if existing, otherwise create one and set its
|
"""Get chef node
|
||||||
environment.
|
|
||||||
|
|
||||||
:param str node_name: The name for this node.
|
Gets the node if existing, otherwise create one and set its
|
||||||
:param str env_name: The environment name for this node.
|
environment.
|
||||||
|
|
||||||
|
:param str node_name: The name for this node.
|
||||||
|
:param str env_name: The environment name for this node.
|
||||||
"""
|
"""
|
||||||
import chef
|
import chef
|
||||||
if not self.chef_api:
|
if not self.chef_api:
|
||||||
@ -194,8 +197,9 @@ class ChefInstaller(PKInstaller):
|
|||||||
|
|
||||||
def add_roles(self, node, roles):
|
def add_roles(self, node, roles):
|
||||||
"""Add roles to the node.
|
"""Add roles to the node.
|
||||||
:param object node: The node object.
|
|
||||||
:param list roles: The list of roles for this node.
|
:param object node: The node object.
|
||||||
|
:param list roles: The list of roles for this node.
|
||||||
"""
|
"""
|
||||||
if node is None:
|
if node is None:
|
||||||
raise Exception("Node is None!")
|
raise Exception("Node is None!")
|
||||||
@ -217,13 +221,15 @@ class ChefInstaller(PKInstaller):
|
|||||||
logging.debug('Runlist for node %s is %s', node.name, node.run_list)
|
logging.debug('Runlist for node %s is %s', node.name, node.run_list)
|
||||||
|
|
||||||
def _generate_node_attributes(self, roles, host_vars_dict):
|
def _generate_node_attributes(self, roles, host_vars_dict):
|
||||||
"""Generate node attributes from templates according to its roles. The
|
"""Generate node attributes.
|
||||||
templates are named by roles without '-'. Return the dictionary
|
|
||||||
of attributes defined in the templates.
|
|
||||||
|
|
||||||
:param list roles: The roles for this node, used to load the
|
Generates from templates according to its roles. The
|
||||||
specific template.
|
templates are named by roles without '-'. Return the dictionary
|
||||||
:param dict host_vars_dict: The dict used in cheetah searchList to
|
of attributes defined in the templates.
|
||||||
|
|
||||||
|
:param list roles: The roles for this node, used to load the
|
||||||
|
specific template.
|
||||||
|
:param dict host_vars_dict: The dict used in cheetah searchList to
|
||||||
render attributes from templates.
|
render attributes from templates.
|
||||||
"""
|
"""
|
||||||
if not roles:
|
if not roles:
|
||||||
@ -296,12 +302,11 @@ class ChefInstaller(PKInstaller):
|
|||||||
env.save()
|
env.save()
|
||||||
|
|
||||||
def upload_environment(self, env_name, global_vars_dict):
|
def upload_environment(self, env_name, global_vars_dict):
|
||||||
"""Generate environment attributes based on the template file and
|
"""Generate environment attributes
|
||||||
upload it to chef server.
|
|
||||||
|
|
||||||
:param str env_name: The environment name.
|
:param str env_name: The environment name.
|
||||||
:param dict vars_dict: The dictionary used in cheetah searchList to
|
:param dict vars_dict: The dictionary used in cheetah searchList to
|
||||||
render attributes from templates.
|
render attributes from templates.
|
||||||
"""
|
"""
|
||||||
env_config = self._generate_env_attributes(global_vars_dict)
|
env_config = self._generate_env_attributes(global_vars_dict)
|
||||||
env = self.get_create_environment(env_name)
|
env = self.get_create_environment(env_name)
|
||||||
@ -341,14 +346,15 @@ class ChefInstaller(PKInstaller):
|
|||||||
databagitem.save()
|
databagitem.save()
|
||||||
|
|
||||||
def _get_host_tmpl_vars(self, host_id, global_vars_dict):
|
def _get_host_tmpl_vars(self, host_id, global_vars_dict):
|
||||||
"""Generate templates variables dictionary for cheetah searchList based
|
"""Generate templates variables dictionary.
|
||||||
on host package config.
|
|
||||||
|
|
||||||
:param int host_id: The host ID.
|
For cheetah searchList based on host package config.
|
||||||
:param dict global_vars_dict: The vars_dict got from cluster level
|
|
||||||
package_config.
|
|
||||||
|
|
||||||
The output format is the same as cluster_vars_dict.
|
:param int host_id: The host ID.
|
||||||
|
:param dict global_vars_dict: The vars_dict got from cluster level
|
||||||
|
package_config.
|
||||||
|
|
||||||
|
The output format is the same as cluster_vars_dict.
|
||||||
"""
|
"""
|
||||||
host_vars_dict = {}
|
host_vars_dict = {}
|
||||||
if global_vars_dict:
|
if global_vars_dict:
|
||||||
@ -378,20 +384,21 @@ class ChefInstaller(PKInstaller):
|
|||||||
|
|
||||||
def _get_cluster_tmpl_vars(self):
|
def _get_cluster_tmpl_vars(self):
|
||||||
"""Generate template variables dict based on cluster level config.
|
"""Generate template variables dict based on cluster level config.
|
||||||
The vars_dict will be:
|
|
||||||
{
|
The vars_dict will be:
|
||||||
"baseinfo": {
|
{
|
||||||
"id":1,
|
"baseinfo": {
|
||||||
"name": "cluster01",
|
"id":1,
|
||||||
...
|
"name": "cluster01",
|
||||||
},
|
...
|
||||||
"package_config": {
|
},
|
||||||
.... //mapped from original package config based on metadata
|
"package_config": {
|
||||||
},
|
.... //mapped from original package config based on metadata
|
||||||
"role_mapping": {
|
},
|
||||||
....
|
"role_mapping": {
|
||||||
}
|
....
|
||||||
}
|
}
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
cluster_vars_dict = {}
|
cluster_vars_dict = {}
|
||||||
# set cluster basic information to vars_dict
|
# set cluster basic information to vars_dict
|
||||||
@ -422,23 +429,24 @@ class ChefInstaller(PKInstaller):
|
|||||||
|
|
||||||
def deploy(self):
|
def deploy(self):
|
||||||
"""Start to deploy a distributed system. Return both cluster and hosts
|
"""Start to deploy a distributed system. Return both cluster and hosts
|
||||||
deployed configs. The return format:
|
|
||||||
{
|
deployed configs. The return format:
|
||||||
"cluster": {
|
{
|
||||||
"id": 1,
|
"cluster": {
|
||||||
"deployed_package_config": {
|
"id": 1,
|
||||||
"roles_mapping": {...},
|
"deployed_package_config": {
|
||||||
"service_credentials": {...},
|
"roles_mapping": {...},
|
||||||
....
|
"service_credentials": {...},
|
||||||
}
|
....
|
||||||
},
|
}
|
||||||
"hosts": {
|
},
|
||||||
1($clusterhost_id): {
|
"hosts": {
|
||||||
"deployed_package_config": {...}
|
1($clusterhost_id): {
|
||||||
},
|
"deployed_package_config": {...}
|
||||||
....
|
},
|
||||||
}
|
....
|
||||||
}
|
}
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
host_list = self.config_manager.get_host_id_list()
|
host_list = self.config_manager.get_host_id_list()
|
||||||
if not host_list:
|
if not host_list:
|
||||||
@ -498,19 +506,20 @@ class ChefInstaller(PKInstaller):
|
|||||||
|
|
||||||
def generate_installer_config(self):
|
def generate_installer_config(self):
|
||||||
"""Render chef config file (client.rb) by OS installing right after
|
"""Render chef config file (client.rb) by OS installing right after
|
||||||
OS is installed successfully.
|
|
||||||
The output format:
|
OS is installed successfully.
|
||||||
{
|
The output format:
|
||||||
'1'($host_id/clusterhost_id):{
|
{
|
||||||
'tool': 'chef',
|
'1'($host_id/clusterhost_id):{
|
||||||
'chef_url': 'https://xxx',
|
'tool': 'chef',
|
||||||
'chef_client_name': '$host_name',
|
'chef_url': 'https://xxx',
|
||||||
'chef_node_name': '$host_name',
|
'chef_client_name': '$host_name',
|
||||||
'chef_server_ip': 'xxx',(op)
|
'chef_node_name': '$host_name',
|
||||||
'chef_server_dns': 'xxx' (op)
|
'chef_server_ip': 'xxx',(op)
|
||||||
},
|
'chef_server_dns': 'xxx' (op)
|
||||||
.....
|
},
|
||||||
}
|
.....
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
host_ids = self.config_manager.get_host_id_list()
|
host_ids = self.config_manager.get_host_id_list()
|
||||||
os_installer_configs = {}
|
os_installer_configs = {}
|
||||||
@ -552,7 +561,8 @@ class ChefInstaller(PKInstaller):
|
|||||||
|
|
||||||
def get_supported_dist_systems(self):
|
def get_supported_dist_systems(self):
|
||||||
"""get target systems from chef. All target_systems for compass will
|
"""get target systems from chef. All target_systems for compass will
|
||||||
be stored in the databag called "compass".
|
|
||||||
|
be stored in the databag called "compass".
|
||||||
"""
|
"""
|
||||||
databag = self.__get_compass_databag()
|
databag = self.__get_compass_databag()
|
||||||
target_systems = {}
|
target_systems = {}
|
||||||
|
@ -31,7 +31,8 @@ class BaseVendor(object):
|
|||||||
|
|
||||||
def is_this_vendor(self, sys_info, **kwargs):
|
def is_this_vendor(self, sys_info, **kwargs):
|
||||||
"""Determine if the host is associated with this vendor.
|
"""Determine if the host is associated with this vendor.
|
||||||
This function must be implemented by vendor itself
|
|
||||||
|
This function must be implemented by vendor itself
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@ -48,10 +49,11 @@ class BaseSnmpVendor(BaseVendor):
|
|||||||
self._matched_names = matched_names
|
self._matched_names = matched_names
|
||||||
|
|
||||||
def is_this_vendor(self, sys_info, **kwargs):
|
def is_this_vendor(self, sys_info, **kwargs):
|
||||||
"""Determine if the switch belongs to this vendor by matching the
|
"""Determine if the switch belongs to this vendor.
|
||||||
system information retrieved from the switch.
|
|
||||||
:param str sys_info: the system information retrieved from a switch
|
Matching the system information retrieved from the switch.
|
||||||
Return True
|
:param str sys_info: the system information retrieved from a switch
|
||||||
|
Return True
|
||||||
"""
|
"""
|
||||||
if sys_info:
|
if sys_info:
|
||||||
for name in self._matched_names:
|
for name in self._matched_names:
|
||||||
@ -62,14 +64,16 @@ class BaseSnmpVendor(BaseVendor):
|
|||||||
|
|
||||||
|
|
||||||
class BasePlugin(object):
|
class BasePlugin(object):
|
||||||
"""Extended by vendor's plugin, which processes request and
|
"""Extended by vendor's plugin.
|
||||||
retrieve info directly from the switch.
|
|
||||||
|
This plugin processes request and retrieve info directly from the switch.
|
||||||
"""
|
"""
|
||||||
__metaclass__ = ABCMeta
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
def process_data(self, oper='SCAN', **kwargs):
|
def process_data(self, oper='SCAN', **kwargs):
|
||||||
"""Each vendors will have some plugins to do some operations.
|
"""Each vendors will have some plugins to do some operations.
|
||||||
Plugin will process request data and return expected result.
|
|
||||||
|
Plugin will process request data and return expected result.
|
||||||
|
|
||||||
:param oper: operation function name.
|
:param oper: operation function name.
|
||||||
:param kwargs: key-value pairs of arguments
|
:param kwargs: key-value pairs of arguments
|
||||||
|
@ -39,8 +39,9 @@ class HDManager(object):
|
|||||||
self.snmp_sysdescr = 'sysDescr.0'
|
self.snmp_sysdescr = 'sysDescr.0'
|
||||||
|
|
||||||
def learn(self, host, credential, vendor, req_obj, oper="SCAN", **kwargs):
|
def learn(self, host, credential, vendor, req_obj, oper="SCAN", **kwargs):
|
||||||
"""Insert/update record of switch_info. Get expected results from
|
"""Insert/update record of switch_info.
|
||||||
switch according to sepcific operation.
|
|
||||||
|
Get expected results from switch according to sepcific operation.
|
||||||
|
|
||||||
:param req_obj: the object of a machine
|
:param req_obj: the object of a machine
|
||||||
:param host: switch IP address
|
:param host: switch IP address
|
||||||
|
@ -278,7 +278,8 @@ def snmpwalk_by_cl(host, credential, oid, timeout=5, retries=3):
|
|||||||
|
|
||||||
def exec_command(command):
|
def exec_command(command):
|
||||||
"""Execute command.
|
"""Execute command.
|
||||||
Return a tuple: returncode, output and error message(None if no error).
|
|
||||||
|
Return a tuple: returncode, output and error message(None if no error).
|
||||||
"""
|
"""
|
||||||
sub_p = subprocess.Popen(command,
|
sub_p = subprocess.Popen(command,
|
||||||
shell=True,
|
shell=True,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
from compass.hdsdiscovery import base
|
from compass.hdsdiscovery import base
|
||||||
|
|
||||||
|
|
||||||
#Vendor_loader will load vendor instance by CLASS_NAME
|
# Vendor_loader will load vendor instance by CLASS_NAME
|
||||||
CLASS_NAME = 'Appliance'
|
CLASS_NAME = 'Appliance'
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,8 +28,8 @@ class Mac(base.BaseSnmpMacPlugin):
|
|||||||
|
|
||||||
def __init__(self, host, credential):
|
def __init__(self, host, credential):
|
||||||
self.host = host
|
self.host = host
|
||||||
#self.credential = credential
|
# self.credential = credential
|
||||||
#return
|
# return
|
||||||
|
|
||||||
def scan(self):
|
def scan(self):
|
||||||
"""Implemnets the scan method in BasePlugin class.
|
"""Implemnets the scan method in BasePlugin class.
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
from compass.hdsdiscovery import base
|
from compass.hdsdiscovery import base
|
||||||
|
|
||||||
|
|
||||||
#Vendor_loader will load vendor instance by CLASS_NAME
|
# Vendor_loader will load vendor instance by CLASS_NAME
|
||||||
CLASS_NAME = 'Arista'
|
CLASS_NAME = 'Arista'
|
||||||
|
|
||||||
|
|
||||||
|
2
compass/hdsdiscovery/vendors/hp/hp.py
vendored
2
compass/hdsdiscovery/vendors/hp/hp.py
vendored
@ -16,7 +16,7 @@
|
|||||||
from compass.hdsdiscovery import base
|
from compass.hdsdiscovery import base
|
||||||
|
|
||||||
|
|
||||||
#Vendor_loader will load vendor instance by CLASS_NAME
|
# Vendor_loader will load vendor instance by CLASS_NAME
|
||||||
CLASS_NAME = 'Hp'
|
CLASS_NAME = 'Hp'
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
from compass.hdsdiscovery import base
|
from compass.hdsdiscovery import base
|
||||||
|
|
||||||
|
|
||||||
#Vendor_loader will load vendor instance by CLASS_NAME
|
# Vendor_loader will load vendor instance by CLASS_NAME
|
||||||
CLASS_NAME = "Huawei"
|
CLASS_NAME = "Huawei"
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from compass.hdsdiscovery import base
|
|||||||
from compass.hdsdiscovery import utils
|
from compass.hdsdiscovery import utils
|
||||||
|
|
||||||
|
|
||||||
#Vendor_loader will load vendor instance by CLASS_NAME
|
# Vendor_loader will load vendor instance by CLASS_NAME
|
||||||
CLASS_NAME = "OVSwitch"
|
CLASS_NAME = "OVSwitch"
|
||||||
|
|
||||||
|
|
||||||
|
2
compass/hdsdiscovery/vendors/pica8/pica8.py
vendored
2
compass/hdsdiscovery/vendors/pica8/pica8.py
vendored
@ -16,7 +16,7 @@
|
|||||||
from compass.hdsdiscovery import base
|
from compass.hdsdiscovery import base
|
||||||
|
|
||||||
|
|
||||||
#Vendor_loader will load vendor instance by CLASS_NAME
|
# Vendor_loader will load vendor instance by CLASS_NAME
|
||||||
CLASS_NAME = 'Pica8'
|
CLASS_NAME = 'Pica8'
|
||||||
|
|
||||||
|
|
||||||
|
@ -211,43 +211,37 @@ def clean_package_installer(
|
|||||||
|
|
||||||
@celery.task(name='compass.tasks.poweron_host')
|
@celery.task(name='compass.tasks.poweron_host')
|
||||||
def poweron_host(host_id):
|
def poweron_host(host_id):
|
||||||
"""Deploy the given cluster.
|
"""Deploy the given cluster."""
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@celery.task(name='compass.tasks.poweroff_host')
|
@celery.task(name='compass.tasks.poweroff_host')
|
||||||
def poweroff_host(host_id):
|
def poweroff_host(host_id):
|
||||||
"""Deploy the given cluster.
|
"""Deploy the given cluster."""
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@celery.task(name='compass.tasks.reset_host')
|
@celery.task(name='compass.tasks.reset_host')
|
||||||
def reset_host(host_id):
|
def reset_host(host_id):
|
||||||
"""Deploy the given cluster.
|
"""Deploy the given cluster."""
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@celery.task(name='compass.tasks.poweron_machine')
|
@celery.task(name='compass.tasks.poweron_machine')
|
||||||
def poweron_machine(machine_id):
|
def poweron_machine(machine_id):
|
||||||
"""Deploy the given cluster.
|
"""Deploy the given cluster."""
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@celery.task(name='compass.tasks.poweroff_machine')
|
@celery.task(name='compass.tasks.poweroff_machine')
|
||||||
def poweroff_machine(machine_id):
|
def poweroff_machine(machine_id):
|
||||||
"""Deploy the given cluster.
|
"""Deploy the given cluster."""
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@celery.task(name='compass.tasks.reset_machine')
|
@celery.task(name='compass.tasks.reset_machine')
|
||||||
def reset_machine(machine_id):
|
def reset_machine(machine_id):
|
||||||
"""Deploy the given cluster.
|
"""Deploy the given cluster."""
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -256,8 +250,7 @@ def os_installed(
|
|||||||
host_id, clusterhosts_ready,
|
host_id, clusterhosts_ready,
|
||||||
clusters_os_ready
|
clusters_os_ready
|
||||||
):
|
):
|
||||||
"""callback when os is installed.
|
"""callback when os is installed."""
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
install_callback.os_installed(
|
install_callback.os_installed(
|
||||||
host_id, clusterhosts_ready,
|
host_id, clusterhosts_ready,
|
||||||
@ -271,8 +264,7 @@ def os_installed(
|
|||||||
def package_installed(
|
def package_installed(
|
||||||
cluster_id, host_id, cluster_ready, host_ready
|
cluster_id, host_id, cluster_ready, host_ready
|
||||||
):
|
):
|
||||||
"""callback when package is installed.
|
"""callback when package is installed."""
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
install_callback.package_installed(
|
install_callback.package_installed(
|
||||||
cluster_id, host_id, cluster_ready, host_ready
|
cluster_id, host_id, cluster_ready, host_ready
|
||||||
@ -285,8 +277,7 @@ def package_installed(
|
|||||||
def cluster_installed(
|
def cluster_installed(
|
||||||
cluster_id, clusterhosts_ready
|
cluster_id, clusterhosts_ready
|
||||||
):
|
):
|
||||||
"""callback when package is installed.
|
"""callback when package is installed."""
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
install_callback.cluster_installed(
|
install_callback.cluster_installed(
|
||||||
cluster_id, clusterhosts_ready
|
cluster_id, clusterhosts_ready
|
||||||
@ -297,8 +288,7 @@ def cluster_installed(
|
|||||||
|
|
||||||
@celery.task(name='compass.tasks.update_progress')
|
@celery.task(name='compass.tasks.update_progress')
|
||||||
def update_clusters_progress():
|
def update_clusters_progress():
|
||||||
"""Calculate the installing progress of the given cluster.
|
"""Calculate the installing progress of the given cluster."""
|
||||||
"""
|
|
||||||
logging.info('update_clusters_progress')
|
logging.info('update_clusters_progress')
|
||||||
try:
|
try:
|
||||||
update_progress.update_progress()
|
update_progress.update_progress()
|
||||||
|
@ -123,7 +123,7 @@ class TestProgressCalculator(unittest2.TestCase):
|
|||||||
if not self.flavor_id:
|
if not self.flavor_id:
|
||||||
raise Exception('flavor id not found')
|
raise Exception('flavor id not found')
|
||||||
|
|
||||||
#add cluster
|
# add cluster
|
||||||
cluster.add_cluster(
|
cluster.add_cluster(
|
||||||
adapter_id=self.adapter_id,
|
adapter_id=self.adapter_id,
|
||||||
os_id=self.os_id,
|
os_id=self.os_id,
|
||||||
@ -139,7 +139,7 @@ class TestProgressCalculator(unittest2.TestCase):
|
|||||||
for list_cluster in list_clusters:
|
for list_cluster in list_clusters:
|
||||||
self.cluster_id = list_cluster['id']
|
self.cluster_id = list_cluster['id']
|
||||||
|
|
||||||
#add switch
|
# add switch
|
||||||
switch.add_switch(
|
switch.add_switch(
|
||||||
ip=SWITCH_IP,
|
ip=SWITCH_IP,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
@ -154,12 +154,12 @@ class TestProgressCalculator(unittest2.TestCase):
|
|||||||
port='1'
|
port='1'
|
||||||
)
|
)
|
||||||
|
|
||||||
#get machine information
|
# get machine information
|
||||||
list_machines = machine.list_machines(user=self.user_object)
|
list_machines = machine.list_machines(user=self.user_object)
|
||||||
for list_machine in list_machines:
|
for list_machine in list_machines:
|
||||||
self.machine_id = list_machine['id']
|
self.machine_id = list_machine['id']
|
||||||
|
|
||||||
#add cluster host
|
# add cluster host
|
||||||
cluster.add_cluster_host(
|
cluster.add_cluster_host(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
@ -171,7 +171,7 @@ class TestProgressCalculator(unittest2.TestCase):
|
|||||||
self.host_id = list_clusterhost['host_id']
|
self.host_id = list_clusterhost['host_id']
|
||||||
self.clusterhost_id = list_clusterhost['clusterhost_id']
|
self.clusterhost_id = list_clusterhost['clusterhost_id']
|
||||||
|
|
||||||
#add subnet
|
# add subnet
|
||||||
network.add_subnet(
|
network.add_subnet(
|
||||||
subnet=SUBNET,
|
subnet=SUBNET,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
@ -182,7 +182,7 @@ class TestProgressCalculator(unittest2.TestCase):
|
|||||||
for list_subnet in list_subnets:
|
for list_subnet in list_subnets:
|
||||||
self.subnet_id = list_subnet['id']
|
self.subnet_id = list_subnet['id']
|
||||||
|
|
||||||
#add host network
|
# add host network
|
||||||
host.add_host_network(
|
host.add_host_network(
|
||||||
self.host_id,
|
self.host_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
@ -192,14 +192,14 @@ class TestProgressCalculator(unittest2.TestCase):
|
|||||||
is_mgmt=True
|
is_mgmt=True
|
||||||
)
|
)
|
||||||
|
|
||||||
#get clusterhost
|
# get clusterhost
|
||||||
list_clusterhosts = cluster.list_clusterhosts(
|
list_clusterhosts = cluster.list_clusterhosts(
|
||||||
user=self.user_object
|
user=self.user_object
|
||||||
)
|
)
|
||||||
for list_clusterhost in list_clusterhosts:
|
for list_clusterhost in list_clusterhosts:
|
||||||
self.clusterhost_id = list_clusterhost['id']
|
self.clusterhost_id = list_clusterhost['id']
|
||||||
|
|
||||||
#update host state
|
# update host state
|
||||||
self.list_hosts = host.list_hosts(user=self.user_object)
|
self.list_hosts = host.list_hosts(user=self.user_object)
|
||||||
for list_host in self.list_hosts:
|
for list_host in self.list_hosts:
|
||||||
self.host_id = list_host['id']
|
self.host_id = list_host['id']
|
||||||
@ -209,14 +209,14 @@ class TestProgressCalculator(unittest2.TestCase):
|
|||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
|
|
||||||
#update cluster state
|
# update cluster state
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
state='INSTALLING'
|
state='INSTALLING'
|
||||||
)
|
)
|
||||||
|
|
||||||
#update clusterhost state
|
# update clusterhost state
|
||||||
cluster.update_clusterhost_state(
|
cluster.update_clusterhost_state(
|
||||||
self.clusterhost_id,
|
self.clusterhost_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
|
@ -85,7 +85,7 @@ class ApiTestCase(unittest2.TestCase):
|
|||||||
resp = json.loads(resp)
|
resp = json.loads(resp)
|
||||||
self.token = resp['token']
|
self.token = resp['token']
|
||||||
|
|
||||||
#create a cluster
|
# create a cluster
|
||||||
adapter_name, adapter_id, os_id, flavor_id = (
|
adapter_name, adapter_id, os_id, flavor_id = (
|
||||||
self._get_adapter_info()
|
self._get_adapter_info()
|
||||||
)
|
)
|
||||||
@ -105,7 +105,7 @@ class ApiTestCase(unittest2.TestCase):
|
|||||||
data['flavor_id'] = flavor_id
|
data['flavor_id'] = flavor_id
|
||||||
self.post(url, data)
|
self.post(url, data)
|
||||||
|
|
||||||
#create a switch
|
# create a switch
|
||||||
url = '/switches'
|
url = '/switches'
|
||||||
datas = [
|
datas = [
|
||||||
{
|
{
|
||||||
@ -537,7 +537,7 @@ class TestSubnetAPI(ApiTestCase):
|
|||||||
self.assertEqual(return_value.status_code, 200)
|
self.assertEqual(return_value.status_code, 200)
|
||||||
self.assertTrue(item in data.items() for item in resp.items())
|
self.assertTrue(item in data.items() for item in resp.items())
|
||||||
|
|
||||||
# give a non-existed id
|
# give a non-existed id
|
||||||
url = '/subnets/99'
|
url = '/subnets/99'
|
||||||
data = {
|
data = {
|
||||||
'subnet': '192.168.100.0/24',
|
'subnet': '192.168.100.0/24',
|
||||||
@ -813,7 +813,7 @@ class TestHostAPI(ApiTestCase):
|
|||||||
self.assertEqual([], resp)
|
self.assertEqual([], resp)
|
||||||
|
|
||||||
def test_show_host(self):
|
def test_show_host(self):
|
||||||
#show a host successfully
|
# show a host successfully
|
||||||
url = '/hosts/1'
|
url = '/hosts/1'
|
||||||
return_value = self.get(url)
|
return_value = self.get(url)
|
||||||
resp = json.loads(return_value.get_data())
|
resp = json.loads(return_value.get_data())
|
||||||
|
@ -408,7 +408,7 @@ class TestUpdateCluster(ClusterTestCase):
|
|||||||
name='cluster_editable'
|
name='cluster_editable'
|
||||||
)
|
)
|
||||||
|
|
||||||
#reinstall
|
# reinstall
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.Forbidden,
|
exception.Forbidden,
|
||||||
cluster.update_cluster,
|
cluster.update_cluster,
|
||||||
@ -441,7 +441,7 @@ class TestDelCluster(ClusterTestCase):
|
|||||||
self.assertNotEqual(1, del_cluster['id'])
|
self.assertNotEqual(1, del_cluster['id'])
|
||||||
|
|
||||||
def test_is_cluster_editable(self):
|
def test_is_cluster_editable(self):
|
||||||
#state is INSTALLING
|
# state is INSTALLING
|
||||||
cluster.update_cluster_state(
|
cluster.update_cluster_state(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
@ -1596,7 +1596,7 @@ class TestUpdateClusterHosts(ClusterTestCase):
|
|||||||
result = item
|
result = item
|
||||||
self.assertNotIn(self.host_id[0], result)
|
self.assertNotIn(self.host_id[0], result)
|
||||||
|
|
||||||
#add host
|
# add host
|
||||||
cluster.update_cluster_hosts(
|
cluster.update_cluster_hosts(
|
||||||
self.cluster_id,
|
self.cluster_id,
|
||||||
user=self.user_object,
|
user=self.user_object,
|
||||||
|
@ -55,7 +55,7 @@ class MetadataTestCase(unittest2.TestCase):
|
|||||||
adapter.load_adapters()
|
adapter.load_adapters()
|
||||||
metadata.load_metadatas()
|
metadata.load_metadatas()
|
||||||
|
|
||||||
#Get a os_id and adapter_id
|
# Get a os_id and adapter_id
|
||||||
self.user_object = (
|
self.user_object = (
|
||||||
user_api.get_user_object(
|
user_api.get_user_object(
|
||||||
setting.COMPASS_ADMIN_EMAIL
|
setting.COMPASS_ADMIN_EMAIL
|
||||||
|
@ -203,8 +203,9 @@ class TestPatchSwitch(BaseTest):
|
|||||||
)
|
)
|
||||||
expected = {
|
expected = {
|
||||||
'credentials': {
|
'credentials': {
|
||||||
'version': '2c',
|
'version': '2c',
|
||||||
'community': 'public'}
|
'community': 'public'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
all(item in patch_switch.items() for item in expected.items())
|
all(item in patch_switch.items() for item in expected.items())
|
||||||
|
@ -55,12 +55,12 @@ class HuaweiTest(unittest2.TestCase):
|
|||||||
|
|
||||||
def test_is_this_vendor(self):
|
def test_is_this_vendor(self):
|
||||||
"""test device vendor is haiwei."""
|
"""test device vendor is haiwei."""
|
||||||
#Incorrect system information
|
# Incorrect system information
|
||||||
incorrect_sys_info = "xxx"
|
incorrect_sys_info = "xxx"
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
self.huawei.is_this_vendor(incorrect_sys_info))
|
self.huawei.is_this_vendor(incorrect_sys_info))
|
||||||
|
|
||||||
#Correct vendor
|
# Correct vendor
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
self.huawei.is_this_vendor(self.sys_info))
|
self.huawei.is_this_vendor(self.sys_info))
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ class HuaweiMacTest(unittest2.TestCase):
|
|||||||
self.assertIsNone(self.mac_plugin.process_data('GET'))
|
self.assertIsNone(self.mac_plugin.process_data('GET'))
|
||||||
|
|
||||||
# SNMP Walk Timeout
|
# SNMP Walk Timeout
|
||||||
#utils.snmpwalk_by_cl = Mock(return_value=None)
|
# utils.snmpwalk_by_cl = Mock(return_value=None)
|
||||||
mock_snmpwalk.return_value = None
|
mock_snmpwalk.return_value = None
|
||||||
self.assertIsNone(self.mac_plugin.process_data())
|
self.assertIsNone(self.mac_plugin.process_data())
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ class HuaweiMacTest(unittest2.TestCase):
|
|||||||
{"mac": "28:6e:d4:64:c7:4a", "port": "2", "vlan": "88"},
|
{"mac": "28:6e:d4:64:c7:4a", "port": "2", "vlan": "88"},
|
||||||
{"mac": "00:0c:29:35:dc:02", "port": "3", "vlan": "88"}
|
{"mac": "00:0c:29:35:dc:02", "port": "3", "vlan": "88"}
|
||||||
]
|
]
|
||||||
#utils.snmpwalk_by_cl = Mock(return_value=mock_snmp_walk_result)
|
# utils.snmpwalk_by_cl = Mock(return_value=mock_snmp_walk_result)
|
||||||
mock_snmpwalk.return_value = mock_snmp_walk_result
|
mock_snmpwalk.return_value = mock_snmp_walk_result
|
||||||
self.mac_plugin.get_port = Mock()
|
self.mac_plugin.get_port = Mock()
|
||||||
self.mac_plugin.get_port.side_effect = ["1", "2", "3"]
|
self.mac_plugin.get_port.side_effect = ["1", "2", "3"]
|
||||||
@ -196,14 +196,14 @@ class HDManagerTest(unittest2.TestCase):
|
|||||||
@patch('compass.hdsdiscovery.hdmanager.HDManager.get_sys_info')
|
@patch('compass.hdsdiscovery.hdmanager.HDManager.get_sys_info')
|
||||||
def test_is_valid_vendor(self, sys_info_mock):
|
def test_is_valid_vendor(self, sys_info_mock):
|
||||||
"""test is_valid_vendor."""
|
"""test is_valid_vendor."""
|
||||||
#non-exsiting vendor under vendors directory
|
# non-exsiting vendor under vendors directory
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
self.manager.is_valid_vendor(self.correct_host,
|
self.manager.is_valid_vendor(self.correct_host,
|
||||||
self.correct_credential,
|
self.correct_credential,
|
||||||
'xxxx')
|
'xxxx')
|
||||||
)
|
)
|
||||||
|
|
||||||
#No system description retrieved
|
# No system description retrieved
|
||||||
sys_info_mock.return_value = (None, 'TIMEOUT')
|
sys_info_mock.return_value = (None, 'TIMEOUT')
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
self.manager.is_valid_vendor(self.correct_host,
|
self.manager.is_valid_vendor(self.correct_host,
|
||||||
@ -211,7 +211,7 @@ class HDManagerTest(unittest2.TestCase):
|
|||||||
'pica8')
|
'pica8')
|
||||||
)
|
)
|
||||||
|
|
||||||
#Incorrect vendor name
|
# Incorrect vendor name
|
||||||
sys_info = 'Pica8 XorPlus Platform Software'
|
sys_info = 'Pica8 XorPlus Platform Software'
|
||||||
sys_info_mock.return_value = (sys_info, '')
|
sys_info_mock.return_value = (sys_info, '')
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
@ -220,7 +220,7 @@ class HDManagerTest(unittest2.TestCase):
|
|||||||
'huawei')
|
'huawei')
|
||||||
)
|
)
|
||||||
|
|
||||||
#Correct vendor name
|
# Correct vendor name
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
self.manager.is_valid_vendor(self.correct_host,
|
self.manager.is_valid_vendor(self.correct_host,
|
||||||
self.correct_credential,
|
self.correct_credential,
|
||||||
@ -229,12 +229,12 @@ class HDManagerTest(unittest2.TestCase):
|
|||||||
|
|
||||||
def test_learn(self):
|
def test_learn(self):
|
||||||
"""test learn."""
|
"""test learn."""
|
||||||
#non-exsiting plugin
|
# non-exsiting plugin
|
||||||
self.assertIsNone(self.manager.learn(self.correct_host,
|
self.assertIsNone(self.manager.learn(self.correct_host,
|
||||||
self.correct_credential,
|
self.correct_credential,
|
||||||
'huawei', 'xxx'))
|
'huawei', 'xxx'))
|
||||||
|
|
||||||
#non-existing vendor
|
# non-existing vendor
|
||||||
self.assertIsNone(self.manager.learn(self.correct_host,
|
self.assertIsNone(self.manager.learn(self.correct_host,
|
||||||
self.correct_credential,
|
self.correct_credential,
|
||||||
'xxxx', 'mac'))
|
'xxxx', 'mac'))
|
||||||
|
@ -61,8 +61,7 @@ OPTIONS = Flags()
|
|||||||
|
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
"""Init flag parsing.
|
"""Init flag parsing."""
|
||||||
"""
|
|
||||||
OPTIONS.parse_args()
|
OPTIONS.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
@ -263,10 +263,12 @@ def get_switch_machines_from_file(filename):
|
|||||||
def execute_cli_by_ssh(cmd, host, username, password=None,
|
def execute_cli_by_ssh(cmd, host, username, password=None,
|
||||||
keyfile='/root/.ssh/id_rsa', nowait=False):
|
keyfile='/root/.ssh/id_rsa', nowait=False):
|
||||||
"""SSH to execute script on remote machine
|
"""SSH to execute script on remote machine
|
||||||
|
|
||||||
:param host: ip of the remote machine
|
:param host: ip of the remote machine
|
||||||
:param username: username to access the remote machine
|
:param username: username to access the remote machine
|
||||||
:param password: password to access the remote machine
|
:param password: password to access the remote machine
|
||||||
:param cmd: command to execute
|
:param cmd: command to execute
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not cmd:
|
if not cmd:
|
||||||
logging.error("No command found!")
|
logging.error("No command found!")
|
||||||
|
@ -70,7 +70,7 @@ EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
|
|||||||
FLOATING_IP_START: 203.0.113.101
|
FLOATING_IP_START: 203.0.113.101
|
||||||
FLOATING_IP_END: 203.0.113.200
|
FLOATING_IP_END: 203.0.113.200
|
||||||
|
|
||||||
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||||
|
|
||||||
physical_device: /dev/sdb
|
physical_device: /dev/sdb
|
||||||
|
@ -131,7 +131,7 @@ EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
|
|||||||
FLOATING_IP_START: 203.0.113.101
|
FLOATING_IP_START: 203.0.113.101
|
||||||
FLOATING_IP_END: 203.0.113.200
|
FLOATING_IP_END: 203.0.113.200
|
||||||
|
|
||||||
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||||
|
|
||||||
physical_device: /dev/sdb
|
physical_device: /dev/sdb
|
||||||
|
@ -83,7 +83,7 @@ EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
|
|||||||
FLOATING_IP_START: 203.0.113.101
|
FLOATING_IP_START: 203.0.113.101
|
||||||
FLOATING_IP_END: 203.0.113.200
|
FLOATING_IP_END: 203.0.113.200
|
||||||
|
|
||||||
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||||
|
|
||||||
physical_device: /dev/sdb
|
physical_device: /dev/sdb
|
||||||
|
12
ez_setup.py
12
ez_setup.py
@ -208,11 +208,13 @@ def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
|
|||||||
|
|
||||||
|
|
||||||
def _extractall(self, path=".", members=None):
|
def _extractall(self, path=".", members=None):
|
||||||
"""Extract all members from the archive to the current working
|
"""Extract all members.
|
||||||
directory and set owner, modification time and permissions on
|
|
||||||
directories afterwards. `path' specifies a different directory
|
From the archive to the current working
|
||||||
to extract to. `members' is optional and must be a subset of the
|
directory and set owner, modification time and permissions on
|
||||||
list returned by getmembers().
|
directories afterwards. `path' specifies a different directory
|
||||||
|
to extract to. `members' is optional and must be a subset of the
|
||||||
|
list returned by getmembers().
|
||||||
"""
|
"""
|
||||||
import copy
|
import copy
|
||||||
import operator
|
import operator
|
||||||
|
@ -5,5 +5,5 @@ testtools>=0.9.32
|
|||||||
testrepository>=0.0.17
|
testrepository>=0.0.17
|
||||||
mimeparse
|
mimeparse
|
||||||
coverage>=3.6
|
coverage>=3.6
|
||||||
hacking>=0.8.0,<0.9
|
hacking
|
||||||
pycrypto<=2.0.1
|
pycrypto<=2.0.1
|
||||||
|
2
tox.ini
2
tox.ini
@ -31,7 +31,7 @@ commands = python setup.py testr --coverage --testr-args='{posargs}'
|
|||||||
downloadcache = ~/cache/pip
|
downloadcache = ~/cache/pip
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
ignore = H302,H304,H233,H803,F401
|
ignore = H302,H304,H233,H803,F401,H104,H236,H237,H238
|
||||||
show-source = true
|
show-source = true
|
||||||
builtins = _
|
builtins = _
|
||||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build
|
||||||
|
Loading…
x
Reference in New Issue
Block a user